summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-mm-memory-tiers25
-rw-r--r--Documentation/accounting/delay-accounting.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt14
-rw-r--r--Documentation/admin-guide/mm/cma_debugfs.rst10
-rw-r--r--Documentation/admin-guide/mm/damon/index.rst6
-rw-r--r--Documentation/admin-guide/mm/damon/start.rst13
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst5
-rw-r--r--Documentation/admin-guide/mm/index.rst1
-rw-r--r--Documentation/admin-guide/mm/ksm.rst36
-rw-r--r--Documentation/admin-guide/mm/multigen_lru.rst162
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst16
-rw-r--r--Documentation/admin-guide/mm/userfaultfd.rst41
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst11
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst3
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/maple_tree.rst217
-rw-r--r--Documentation/core-api/mm-api.rst3
-rw-r--r--Documentation/dev-tools/index.rst1
-rw-r--r--Documentation/dev-tools/kasan.rst17
-rw-r--r--Documentation/dev-tools/kmsan.rst427
-rw-r--r--Documentation/mm/index.rst1
-rw-r--r--Documentation/mm/ksm.rst2
-rw-r--r--Documentation/mm/multigen_lru.rst159
-rw-r--r--Documentation/mm/page_owner.rst5
-rw-r--r--MAINTAINERS27
-rw-r--r--Makefile1
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/alpha/include/uapi/asm/mman.h2
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/milbeaut_m10v_defconfig2
-rw-r--r--arch/arm/configs/oxnas_v6_defconfig2
-rw-r--r--arch/arm/configs/pxa_defconfig2
-rw-r--r--arch/arm/configs/sama7_defconfig2
-rw-r--r--arch/arm/configs/sp7021_defconfig2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/pgtable.h15
-rw-r--r--arch/arm64/kernel/elfcore.c16
-rw-r--r--arch/arm64/kernel/vdso.c3
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/csky/Kconfig2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/sparsemem.h6
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/m68k/Kconfig.cpu2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/generic_defconfig1
-rw-r--r--arch/mips/include/uapi/asm/mman.h2
-rw-r--r--arch/nios2/Kconfig2
-rw-r--r--arch/parisc/include/uapi/asm/mman.h2
-rw-r--r--arch/parisc/kernel/cache.c9
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig2
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config2
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/kernel/vdso.c6
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c11
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c13
-rw-r--r--arch/riscv/kernel/vdso.c3
-rw-r--r--arch/s390/kernel/vdso.c3
-rw-r--r--arch/s390/lib/uaccess.c3
-rw-r--r--arch/s390/mm/gmap.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c10
-rw-r--r--arch/sh/configs/ecovec24_defconfig2
-rw-r--r--arch/sh/configs/sdk7786_defconfig1
-rw-r--r--arch/sh/configs/urquell_defconfig1
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/um/kernel/tlb.c14
-rw-r--r--arch/x86/Kconfig10
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/entry/vdso/Makefile3
-rw-r--r--arch/x86/entry/vdso/vma.c9
-rw-r--r--arch/x86/include/asm/checksum.h16
-rw-r--r--arch/x86/include/asm/kmsan.h87
-rw-r--r--arch/x86/include/asm/page_64.h7
-rw-r--r--arch/x86/include/asm/pgtable-3level.h8
-rw-r--r--arch/x86/include/asm/pgtable.h9
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h47
-rw-r--r--arch/x86/include/asm/sparsemem.h2
-rw-r--r--arch/x86/include/asm/string_64.h23
-rw-r--r--arch/x86/include/asm/uaccess.h22
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/dumpstack.c6
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/tboot.c2
-rw-r--r--arch/x86/kernel/unwind_frame.c11
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/iomem.c5
-rw-r--r--arch/x86/mm/Makefile5
-rw-r--r--arch/x86/mm/fault.c23
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/ioremap.c3
-rw-r--r--arch/x86/mm/kmsan_shadow.c20
-rw-r--r--arch/x86/mm/pgtable.c5
-rw-r--r--arch/x86/realmode/rm/Makefile1
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h2
-rw-r--r--arch/xtensa/kernel/syscall.c18
-rw-r--r--block/bio.c2
-rw-r--r--block/blk.h7
-rw-r--r--crypto/Kconfig2
-rw-r--r--drivers/base/memory.c6
-rw-r--r--drivers/base/node.c141
-rw-r--r--drivers/block/zram/zram_drv.c38
-rw-r--r--drivers/block/zram/zram_drv.h15
-rw-r--r--drivers/dax/kmem.c42
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c14
-rw-r--r--drivers/input/serio/libps2.c5
-rw-r--r--drivers/misc/cxl/fault.c45
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/tee/optee/call.c18
-rw-r--r--drivers/usb/core/urb.c2
-rw-r--r--drivers/virtio/virtio_ring.c10
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/btrfs/compression.c31
-rw-r--r--fs/btrfs/extent_io.c33
-rw-r--r--fs/btrfs/subpage.c2
-rw-r--r--fs/btrfs/tests/extent-io-tests.c32
-rw-r--r--fs/buffer.c158
-rw-r--r--fs/coredump.c34
-rw-r--r--fs/exec.c14
-rw-r--r--fs/ext2/balloc.c7
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/gfs2/meta_io.c7
-rw-r--r--fs/gfs2/quota.c8
-rw-r--r--fs/hugetlbfs/inode.c300
-rw-r--r--fs/isofs/compress.c2
-rw-r--r--fs/jbd2/journal.c15
-rw-r--r--fs/jbd2/recovery.c16
-rw-r--r--fs/namei.c2
-rw-r--r--fs/nilfs2/page.c45
-rw-r--r--fs/ntfs3/inode.c7
-rw-r--r--fs/ocfs2/aops.c2
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/posix_acl.c3
-rw-r--r--fs/proc/base.c20
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/task_mmu.c96
-rw-r--r--fs/proc/task_nommu.c45
-rw-r--r--fs/ramfs/file-nommu.c50
-rw-r--r--fs/reiserfs/journal.c11
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/directory.c2
-rw-r--r--fs/udf/inode.c8
-rw-r--r--fs/ufs/balloc.c12
-rw-r--r--fs/userfaultfd.c133
-rw-r--r--include/asm-generic/cacheflush.h14
-rw-r--r--include/linux/buffer_head.h48
-rw-r--r--include/linux/cache.h13
-rw-r--r--include/linux/cgroup.h15
-rw-r--r--include/linux/compiler-clang.h23
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/compiler_types.h3
-rw-r--r--include/linux/damon.h86
-rw-r--r--include/linux/delayacct.h16
-rw-r--r--include/linux/fortify-string.h2
-rw-r--r--include/linux/gfp.h26
-rw-r--r--include/linux/highmem.h3
-rw-r--r--include/linux/huge_mm.h28
-rw-r--r--include/linux/hugetlb.h63
-rw-r--r--include/linux/hugetlb_cgroup.h19
-rw-r--r--include/linux/instrumented.h59
-rw-r--r--include/linux/kasan.h55
-rw-r--r--include/linux/khugepaged.h13
-rw-r--r--include/linux/kmsan-checks.h83
-rw-r--r--include/linux/kmsan.h330
-rw-r--r--include/linux/kmsan_types.h35
-rw-r--r--include/linux/ksm.h3
-rw-r--r--include/linux/maple_tree.h685
-rw-r--r--include/linux/memcontrol.h99
-rw-r--r--include/linux/memory-tiers.h102
-rw-r--r--include/linux/memory_hotplug.h30
-rw-r--r--include/linux/mempolicy.h13
-rw-r--r--include/linux/migrate.h15
-rw-r--r--include/linux/mm.h161
-rw-r--r--include/linux/mm_inline.h242
-rw-r--r--include/linux/mm_types.h178
-rw-r--r--include/linux/mm_types_task.h12
-rw-r--r--include/linux/mmzone.h281
-rw-r--r--include/linux/node.h29
-rw-r--r--include/linux/nodemask.h14
-rw-r--r--include/linux/oom.h11
-rw-r--r--include/linux/page-flags-layout.h16
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/page_counter.h26
-rw-r--r--include/linux/page_ext.h24
-rw-r--r--include/linux/page_idle.h34
-rw-r--r--include/linux/pageblock-flags.h4
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagewalk.h10
-rw-r--r--include/linux/pgtable.h26
-rw-r--r--include/linux/rmap.h73
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/coredump.h7
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/slab.h8
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/swap.h41
-rw-r--r--include/linux/swap_cgroup.h4
-rw-r--r--include/linux/swapfile.h7
-rw-r--r--include/linux/swapops.h150
-rw-r--r--include/linux/uaccess.h19
-rw-r--r--include/linux/userfaultfd_k.h7
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmacache.h28
-rw-r--r--include/linux/vmstat.h6
-rw-r--r--include/linux/writeback.h8
-rw-r--r--include/trace/events/huge_memory.h37
-rw-r--r--include/trace/events/maple_tree.h123
-rw-r--r--include/trace/events/mmap.h73
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h26
-rw-r--r--include/uapi/asm-generic/mman-common.h2
-rw-r--r--include/uapi/linux/userfaultfd.h4
-rw-r--r--init/Kconfig5
-rw-r--r--init/main.c11
-rw-r--r--ipc/shm.c21
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/acct.c11
-rw-r--r--kernel/bounds.c7
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/task_iter.c10
-rw-r--r--kernel/cgroup/cgroup-internal.h1
-rw-r--r--kernel/debug/debug_core.c12
-rw-r--r--kernel/delayacct.c13
-rw-r--r--kernel/dma/mapping.c10
-rw-r--r--kernel/entry/common.c5
-rw-r--r--kernel/events/core.c3
-rw-r--r--kernel/events/uprobes.c39
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c79
-rw-r--r--kernel/kcov.c7
-rw-r--r--kernel/locking/Makefile3
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/debug.c1
-rw-r--r--kernel/sched/fair.c175
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/sysctl.c8
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/Kconfig.kmsan62
-rw-r--r--lib/Makefile10
-rw-r--r--lib/iomap.c44
-rw-r--r--lib/iov_iter.c9
-rw-r--r--lib/maple_tree.c7130
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/stackdepot.c29
-rw-r--r--lib/string.c8
-rw-r--r--lib/test_hmm.c13
-rw-r--r--lib/test_maple_tree.c38307
-rw-r--r--lib/usercopy.c3
-rw-r--r--mm/Kconfig34
-rw-r--r--mm/Makefile8
-rw-r--r--mm/backing-dev.c6
-rw-r--r--mm/cma_debug.c5
-rw-r--r--mm/compaction.c24
-rw-r--r--mm/damon/Kconfig3
-rw-r--r--mm/damon/core-test.h29
-rw-r--r--mm/damon/core.c281
-rw-r--r--mm/damon/dbgfs.c52
-rw-r--r--mm/damon/lru_sort.c380
-rw-r--r--mm/damon/modules-common.h46
-rw-r--r--mm/damon/ops-common.c50
-rw-r--r--mm/damon/ops-common.h2
-rw-r--r--mm/damon/paddr.c43
-rw-r--r--mm/damon/reclaim.c289
-rw-r--r--mm/damon/sysfs.c143
-rw-r--r--mm/damon/vaddr-test.h36
-rw-r--r--mm/damon/vaddr.c102
-rw-r--r--mm/debug.c14
-rw-r--r--mm/filemap.c124
-rw-r--r--mm/folio-compat.c6
-rw-r--r--mm/gup.c248
-rw-r--r--mm/hmm.c2
-rw-r--r--mm/huge_memory.c205
-rw-r--r--mm/hugetlb.c792
-rw-r--r--mm/hugetlb_cgroup.c27
-rw-r--r--mm/hugetlb_vmemmap.c10
-rw-r--r--mm/hwpoison-inject.c4
-rw-r--r--mm/init-mm.c4
-rw-r--r--mm/internal.h48
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/common.c177
-rw-r--r--mm/kasan/generic.c154
-rw-r--r--mm/kasan/hw_tags.c39
-rw-r--r--mm/kasan/kasan.h171
-rw-r--r--mm/kasan/kasan_test.c (renamed from lib/test_kasan.c)26
-rw-r--r--mm/kasan/kasan_test_module.c (renamed from lib/test_kasan_module.c)2
-rw-r--r--mm/kasan/report.c138
-rw-r--r--mm/kasan/report_generic.c46
-rw-r--r--mm/kasan/report_tags.c123
-rw-r--r--mm/kasan/sw_tags.c5
-rw-r--r--mm/kasan/tags.c143
-rw-r--r--mm/kfence/core.c22
-rw-r--r--mm/khugepaged.c1168
-rw-r--r--mm/kmemleak.c21
-rw-r--r--mm/kmsan/Makefile28
-rw-r--r--mm/kmsan/core.c450
-rw-r--r--mm/kmsan/hooks.c384
-rw-r--r--mm/kmsan/init.c235
-rw-r--r--mm/kmsan/instrumentation.c307
-rw-r--r--mm/kmsan/kmsan.h209
-rw-r--r--mm/kmsan/kmsan_test.c581
-rw-r--r--mm/kmsan/report.c219
-rw-r--r--mm/kmsan/shadow.c298
-rw-r--r--mm/ksm.c375
-rw-r--r--mm/madvise.c60
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/memcontrol.c281
-rw-r--r--mm/memory-failure.c126
-rw-r--r--mm/memory-tiers.c732
-rw-r--r--mm/memory.c274
-rw-r--r--mm/memory_hotplug.c11
-rw-r--r--mm/mempolicy.c66
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/migrate.c651
-rw-r--r--mm/migrate_device.c19
-rw-r--r--mm/mlock.c37
-rw-r--r--mm/mm_init.c6
-rw-r--r--mm/mm_slot.h55
-rw-r--r--mm/mmap.c2259
-rw-r--r--mm/mmzone.c2
-rw-r--r--mm/mprotect.c17
-rw-r--r--mm/mremap.c41
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c260
-rw-r--r--mm/oom_kill.c11
-rw-r--r--mm/page_alloc.c196
-rw-r--r--mm/page_counter.c15
-rw-r--r--mm/page_ext.c117
-rw-r--r--mm/page_io.c43
-rw-r--r--mm/page_isolation.c17
-rw-r--r--mm/page_owner.c103
-rw-r--r--mm/page_table_check.c14
-rw-r--r--mm/page_vma_mapped.c6
-rw-r--r--mm/pagewalk.c12
-rw-r--r--mm/rmap.c184
-rw-r--r--mm/rodata_test.c8
-rw-r--r--mm/secretmem.c8
-rw-r--r--mm/shmem.c429
-rw-r--r--mm/shuffle.c21
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c41
-rw-r--r--mm/swap.c73
-rw-r--r--mm/swap.h18
-rw-r--r--mm/swap_cgroup.c6
-rw-r--r--mm/swap_slots.c2
-rw-r--r--mm/swap_state.c111
-rw-r--r--mm/swapfile.c178
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/userfaultfd.c28
-rw-r--r--mm/util.c36
-rw-r--r--mm/vmacache.c117
-rw-r--r--mm/vmalloc.c35
-rw-r--r--mm/vmscan.c3336
-rw-r--r--mm/vmstat.c9
-rw-r--r--mm/workingset.c110
-rw-r--r--mm/zsmalloc.c22
-rw-r--r--mm/zswap.c2
-rw-r--r--scripts/Makefile.kmsan8
-rw-r--r--scripts/Makefile.lib9
-rw-r--r--security/Kconfig.hardening4
-rw-r--r--tools/include/asm-generic/hugetlb_encode.h26
-rw-r--r--tools/include/linux/slab.h4
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h2
-rw-r--r--tools/objtool/check.c20
-rw-r--r--tools/testing/memblock/linux/mmzone.h2
-rw-r--r--tools/testing/radix-tree/.gitignore2
-rw-r--r--tools/testing/radix-tree/Makefile9
-rw-r--r--tools/testing/radix-tree/generated/autoconf.h1
-rw-r--r--tools/testing/radix-tree/linux.c160
-rw-r--r--tools/testing/radix-tree/linux/kernel.h1
-rw-r--r--tools/testing/radix-tree/linux/lockdep.h2
-rw-r--r--tools/testing/radix-tree/linux/maple_tree.h7
-rw-r--r--tools/testing/radix-tree/maple.c59
-rw-r--r--tools/testing/radix-tree/trace/events/maple_tree.h5
-rw-r--r--tools/testing/selftests/cgroup/config1
-rw-r--r--tools/testing/selftests/damon/Makefile1
-rw-r--r--tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh27
-rw-r--r--tools/testing/selftests/vm/.gitignore1
-rw-r--r--tools/testing/selftests/vm/Makefile23
-rw-r--r--tools/testing/selftests/vm/check_config.sh31
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c108
-rw-r--r--tools/testing/selftests/vm/khugepaged.c1195
-rw-r--r--tools/testing/selftests/vm/mremap_test.c49
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh15
-rw-r--r--tools/testing/selftests/vm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/vm/split_huge_page_test.c12
-rwxr-xr-xtools/testing/selftests/vm/test_hmm.sh10
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c247
-rw-r--r--tools/testing/selftests/vm/vm_util.c36
-rw-r--r--tools/testing/selftests/vm/vm_util.h5
-rw-r--r--tools/vm/page_owner_sort.c7
409 files changed, 65682 insertions, 7924 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-memory-tiers b/Documentation/ABI/testing/sysfs-kernel-mm-memory-tiers
new file mode 100644
index 000000000000..45985e411f13
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-mm-memory-tiers
@@ -0,0 +1,25 @@
+What: /sys/devices/virtual/memory_tiering/
+Date: August 2022
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: A collection of all the memory tiers allocated.
+
+ Individual memory tier details are contained in subdirectories
+ named by the abstract distance of the memory tier.
+
+ /sys/devices/virtual/memory_tiering/memory_tierN/
+
+
+What: /sys/devices/virtual/memory_tiering/memory_tierN/
+ /sys/devices/virtual/memory_tiering/memory_tierN/nodes
+Date: August 2022
+Contact: Linux memory management mailing list <linux-mm@kvack.org>
+Description: Directory with details of a specific memory tier
+
+ This is the directory containing information about a particular
+ memory tier, memtierN, where N is derived based on abstract distance.
+
+ A smaller value of N implies a higher (faster) memory tier in the
+ hierarchy.
+
+ nodes: NUMA nodes that are part of this memory tier.
+
diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst
index 241d1a87f2cd..7103b62ba6d7 100644
--- a/Documentation/accounting/delay-accounting.rst
+++ b/Documentation/accounting/delay-accounting.rst
@@ -13,7 +13,7 @@ a) waiting for a CPU (while being runnable)
b) completion of synchronous block I/O initiated by the task
c) swapping in pages
d) memory reclaim
-e) thrashing page cache
+e) thrashing
f) direct compact
g) write-protect copy
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 2cc502a75ef6..5b86245450bd 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -299,7 +299,7 @@ Per-node-per-memcgroup LRU (cgroup's private LRU) is guarded by
lruvec->lru_lock; PG_lru bit of page->flags is cleared before
isolating a page from its LRU under lruvec->lru_lock.
-2.7 Kernel Memory Extension (CONFIG_MEMCG_KMEM)
+2.7 Kernel Memory Extension
-----------------------------------------------
With the Kernel memory extension, the Memory Controller is able to limit
@@ -386,8 +386,6 @@ U != 0, K >= U:
a. Enable CONFIG_CGROUPS
b. Enable CONFIG_MEMCG
-c. Enable CONFIG_MEMCG_SWAP (to use swap extension)
-d. Enable CONFIG_MEMCG_KMEM (to use kmem extension)
3.1. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
-------------------------------------------------------------------
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 1b3565b61b65..69b1533c1f02 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1469,6 +1469,14 @@
Permit 'security.evm' to be updated regardless of
current integrity status.
+ early_page_ext [KNL] Enforces page_ext initialization to earlier
+ stages so cover more early boot allocations.
+ Please note that as side effect some optimizations
+ might be disabled to achieve that (e.g. parallelized
+ memory initialization is disabled) so the boot process
+ might take longer, especially on systems with a lot of
+ memory. Available with CONFIG_PAGE_EXTENSION=y.
+
failslab=
fail_usercopy=
fail_page_alloc=
@@ -6041,12 +6049,6 @@
This parameter controls use of the Protected
Execution Facility on pSeries.
- swapaccount= [KNL]
- Format: [0|1]
- Enable accounting of swap in memory resource
- controller if no parameter or 1 is given or disable
- it if 0 is given (See Documentation/admin-guide/cgroup-v1/memory.rst)
-
swiotlb= [ARM,IA-64,PPC,MIPS,X86]
Format: { <int> [,<int>] | force | noforce }
<int> -- Number of I/O TLB slabs
diff --git a/Documentation/admin-guide/mm/cma_debugfs.rst b/Documentation/admin-guide/mm/cma_debugfs.rst
index 4e06ffabd78a..7367e6294ef6 100644
--- a/Documentation/admin-guide/mm/cma_debugfs.rst
+++ b/Documentation/admin-guide/mm/cma_debugfs.rst
@@ -5,10 +5,10 @@ CMA Debugfs Interface
The CMA debugfs interface is useful to retrieve basic information out of the
different CMA areas and to test allocation/release in each of the areas.
-Each CMA zone represents a directory under <debugfs>/cma/, indexed by the
-kernel's CMA index. So the first CMA zone would be:
+Each CMA area represents a directory under <debugfs>/cma/, represented by
+its CMA name like below:
- <debugfs>/cma/cma-0
+ <debugfs>/cma/<cma_name>
The structure of the files created under that directory is as follows:
@@ -18,8 +18,8 @@ The structure of the files created under that directory is as follows:
- [RO] bitmap: The bitmap of page states in the zone.
- [WO] alloc: Allocate N pages from that CMA area. For example::
- echo 5 > <debugfs>/cma/cma-2/alloc
+ echo 5 > <debugfs>/cma/<cma_name>/alloc
-would try to allocate 5 pages from the cma-2 area.
+would try to allocate 5 pages from the 'cma_name' area.
- [WO] free: Free N pages from that CMA area, similar to the above.
diff --git a/Documentation/admin-guide/mm/damon/index.rst b/Documentation/admin-guide/mm/damon/index.rst
index 05500042f777..33d37bb2fb4e 100644
--- a/Documentation/admin-guide/mm/damon/index.rst
+++ b/Documentation/admin-guide/mm/damon/index.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-========================
-Monitoring Data Accesses
-========================
+==========================
+DAMON: Data Access MONitor
+==========================
:doc:`DAMON </mm/damon/index>` allows light-weight data access monitoring.
Using DAMON, users can analyze the memory access patterns of their systems and
diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst
index 4d5ca2c46288..9f88afc734da 100644
--- a/Documentation/admin-guide/mm/damon/start.rst
+++ b/Documentation/admin-guide/mm/damon/start.rst
@@ -29,16 +29,9 @@ called DAMON Operator (DAMO). It is available at
https://github.com/awslabs/damo. The examples below assume that ``damo`` is on
your ``$PATH``. It's not mandatory, though.
-Because DAMO is using the debugfs interface (refer to :doc:`usage` for the
-detail) of DAMON, you should ensure debugfs is mounted. Mount it manually as
-below::
-
- # mount -t debugfs none /sys/kernel/debug/
-
-or append the following line to your ``/etc/fstab`` file so that your system
-can automatically mount debugfs upon booting::
-
- debugfs /sys/kernel/debug debugfs defaults 0 0
+Because DAMO is using the sysfs interface (refer to :doc:`usage` for the
+detail) of DAMON, you should ensure :doc:`sysfs </filesystems/sysfs>` is
+mounted.
Recording Data Access Patterns
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index ca91ecc29078..b47b0cbbd491 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -393,6 +393,11 @@ the files as above. Above is only for an example.
debugfs Interface
=================
+.. note::
+
+ DAMON debugfs interface will be removed after next LTS kernel is released, so
+ users should move to the :ref:`sysfs interface <sysfs_interface>`.
+
DAMON exports eight files, ``attrs``, ``target_ids``, ``init_regions``,
``schemes``, ``monitor_on``, ``kdamond_pid``, ``mk_contexts`` and
``rm_contexts`` under its debugfs directory, ``<debugfs>/damon/``.
diff --git a/Documentation/admin-guide/mm/index.rst b/Documentation/admin-guide/mm/index.rst
index 1bd11118dfb1..d1064e0ba34a 100644
--- a/Documentation/admin-guide/mm/index.rst
+++ b/Documentation/admin-guide/mm/index.rst
@@ -32,6 +32,7 @@ the Linux memory management.
idle_page_tracking
ksm
memory-hotplug
+ multigen_lru
nommu-mmap
numa_memory_policy
numaperf
diff --git a/Documentation/admin-guide/mm/ksm.rst b/Documentation/admin-guide/mm/ksm.rst
index b244f0202a03..fb6ba2002a4b 100644
--- a/Documentation/admin-guide/mm/ksm.rst
+++ b/Documentation/admin-guide/mm/ksm.rst
@@ -184,6 +184,42 @@ The maximum possible ``pages_sharing/pages_shared`` ratio is limited by the
``max_page_sharing`` tunable. To increase the ratio ``max_page_sharing`` must
be increased accordingly.
+Monitoring KSM profit
+=====================
+
+KSM can save memory by merging identical pages, but also can consume
+additional memory, because it needs to generate a number of rmap_items to
+save each scanned page's brief rmap information. Some of these pages may
+be merged, but some may not be abled to be merged after being checked
+several times, which are unprofitable memory consumed.
+
+1) How to determine whether KSM save memory or consume memory in system-wide
+ range? Here is a simple approximate calculation for reference::
+
+ general_profit =~ pages_sharing * sizeof(page) - (all_rmap_items) *
+ sizeof(rmap_item);
+
+ where all_rmap_items can be easily obtained by summing ``pages_sharing``,
+ ``pages_shared``, ``pages_unshared`` and ``pages_volatile``.
+
+2) The KSM profit inner a single process can be similarly obtained by the
+ following approximate calculation::
+
+ process_profit =~ ksm_merging_pages * sizeof(page) -
+ ksm_rmap_items * sizeof(rmap_item).
+
+ where ksm_merging_pages is shown under the directory ``/proc/<pid>/``,
+ and ksm_rmap_items is shown in ``/proc/<pid>/ksm_stat``.
+
+From the perspective of application, a high ratio of ``ksm_rmap_items`` to
+``ksm_merging_pages`` means a bad madvise-applied policy, so developers or
+administrators have to rethink how to change madvise policy. Giving an example
+for reference, a page's size is usually 4K, and the rmap_item's size is
+separately 32B on 32-bit CPU architecture and 64B on 64-bit CPU architecture.
+so if the ``ksm_rmap_items/ksm_merging_pages`` ratio exceeds 64 on 64-bit CPU
+or exceeds 128 on 32-bit CPU, then the app's madvise policy should be dropped,
+because the ksm profit is approximately zero or negative.
+
Monitoring KSM events
=====================
diff --git a/Documentation/admin-guide/mm/multigen_lru.rst b/Documentation/admin-guide/mm/multigen_lru.rst
new file mode 100644
index 000000000000..33e068830497
--- /dev/null
+++ b/Documentation/admin-guide/mm/multigen_lru.rst
@@ -0,0 +1,162 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Multi-Gen LRU
+=============
+The multi-gen LRU is an alternative LRU implementation that optimizes
+page reclaim and improves performance under memory pressure. Page
+reclaim decides the kernel's caching policy and ability to overcommit
+memory. It directly impacts the kswapd CPU usage and RAM efficiency.
+
+Quick start
+===========
+Build the kernel with the following configurations.
+
+* ``CONFIG_LRU_GEN=y``
+* ``CONFIG_LRU_GEN_ENABLED=y``
+
+All set!
+
+Runtime options
+===============
+``/sys/kernel/mm/lru_gen/`` contains stable ABIs described in the
+following subsections.
+
+Kill switch
+-----------
+``enabled`` accepts different values to enable or disable the
+following components. Its default value depends on
+``CONFIG_LRU_GEN_ENABLED``. All the components should be enabled
+unless some of them have unforeseen side effects. Writing to
+``enabled`` has no effect when a component is not supported by the
+hardware, and valid values will be accepted even when the main switch
+is off.
+
+====== ===============================================================
+Values Components
+====== ===============================================================
+0x0001 The main switch for the multi-gen LRU.
+0x0002 Clearing the accessed bit in leaf page table entries in large
+ batches, when MMU sets it (e.g., on x86). This behavior can
+ theoretically worsen lock contention (mmap_lock). If it is
+ disabled, the multi-gen LRU will suffer a minor performance
+ degradation for workloads that contiguously map hot pages,
+ whose accessed bits can be otherwise cleared by fewer larger
+ batches.
+0x0004 Clearing the accessed bit in non-leaf page table entries as
+ well, when MMU sets it (e.g., on x86). This behavior was not
+ verified on x86 varieties other than Intel and AMD. If it is
+ disabled, the multi-gen LRU will suffer a negligible
+ performance degradation.
+[yYnN] Apply to all the components above.
+====== ===============================================================
+
+E.g.,
+::
+
+ echo y >/sys/kernel/mm/lru_gen/enabled
+ cat /sys/kernel/mm/lru_gen/enabled
+ 0x0007
+ echo 5 >/sys/kernel/mm/lru_gen/enabled
+ cat /sys/kernel/mm/lru_gen/enabled
+ 0x0005
+
+Thrashing prevention
+--------------------
+Personal computers are more sensitive to thrashing because it can
+cause janks (lags when rendering UI) and negatively impact user
+experience. The multi-gen LRU offers thrashing prevention to the
+majority of laptop and desktop users who do not have ``oomd``.
+
+Users can write ``N`` to ``min_ttl_ms`` to prevent the working set of
+``N`` milliseconds from getting evicted. The OOM killer is triggered
+if this working set cannot be kept in memory. In other words, this
+option works as an adjustable pressure relief valve, and when open, it
+terminates applications that are hopefully not being used.
+
+Based on the average human detectable lag (~100ms), ``N=1000`` usually
+eliminates intolerable janks due to thrashing. Larger values like
+``N=3000`` make janks less noticeable at the risk of premature OOM
+kills.
+
+The default value ``0`` means disabled.
+
+Experimental features
+=====================
+``/sys/kernel/debug/lru_gen`` accepts commands described in the
+following subsections. Multiple command lines are supported, so does
+concatenation with delimiters ``,`` and ``;``.
+
+``/sys/kernel/debug/lru_gen_full`` provides additional stats for
+debugging. ``CONFIG_LRU_GEN_STATS=y`` keeps historical stats from
+evicted generations in this file.
+
+Working set estimation
+----------------------
+Working set estimation measures how much memory an application needs
+in a given time interval, and it is usually done with little impact on
+the performance of the application. E.g., data centers want to
+optimize job scheduling (bin packing) to improve memory utilizations.
+When a new job comes in, the job scheduler needs to find out whether
+each server it manages can allocate a certain amount of memory for
+this new job before it can pick a candidate. To do so, the job
+scheduler needs to estimate the working sets of the existing jobs.
+
+When it is read, ``lru_gen`` returns a histogram of numbers of pages
+accessed over different time intervals for each memcg and node.
+``MAX_NR_GENS`` decides the number of bins for each histogram. The
+histograms are noncumulative.
+::
+
+ memcg memcg_id memcg_path
+ node node_id
+ min_gen_nr age_in_ms nr_anon_pages nr_file_pages
+ ...
+ max_gen_nr age_in_ms nr_anon_pages nr_file_pages
+
+Each bin contains an estimated number of pages that have been accessed
+within ``age_in_ms``. E.g., ``min_gen_nr`` contains the coldest pages
+and ``max_gen_nr`` contains the hottest pages, since ``age_in_ms`` of
+the former is the largest and that of the latter is the smallest.
+
+Users can write the following command to ``lru_gen`` to create a new
+generation ``max_gen_nr+1``:
+
+ ``+ memcg_id node_id max_gen_nr [can_swap [force_scan]]``
+
+``can_swap`` defaults to the swap setting and, if it is set to ``1``,
+it forces the scan of anon pages when swap is off, and vice versa.
+``force_scan`` defaults to ``1`` and, if it is set to ``0``, it
+employs heuristics to reduce the overhead, which is likely to reduce
+the coverage as well.
+
+A typical use case is that a job scheduler runs this command at a
+certain time interval to create new generations, and it ranks the
+servers it manages based on the sizes of their cold pages defined by
+this time interval.
+
+Proactive reclaim
+-----------------
+Proactive reclaim induces page reclaim when there is no memory
+pressure. It usually targets cold pages only. E.g., when a new job
+comes in, the job scheduler wants to proactively reclaim cold pages on
+the server it selected, to improve the chance of successfully landing
+this new job.
+
+Users can write the following command to ``lru_gen`` to evict
+generations less than or equal to ``min_gen_nr``.
+
+ ``- memcg_id node_id min_gen_nr [swappiness [nr_to_reclaim]]``
+
+``min_gen_nr`` should be less than ``max_gen_nr-1``, since
+``max_gen_nr`` and ``max_gen_nr-1`` are not fully aged (equivalent to
+the active list) and therefore cannot be evicted. ``swappiness``
+overrides the default value in ``/proc/sys/vm/swappiness``.
+``nr_to_reclaim`` limits the number of pages to evict.
+
+A typical use case is that a job scheduler runs this command before it
+tries to land a new job on a server. If it fails to materialize enough
+cold pages because of the overestimation, it retries on the next
+server according to the ranking result obtained from the working set
+estimation step. This less forceful approach limits the impacts on the
+existing jobs.
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index c9c37f16eef8..8ee78ec232eb 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -191,7 +191,14 @@ allocation failure to throttle the next allocation attempt::
/sys/kernel/mm/transparent_hugepage/khugepaged/alloc_sleep_millisecs
-The khugepaged progress can be seen in the number of pages collapsed::
+The khugepaged progress can be seen in the number of pages collapsed (note
+that this counter may not be an exact count of the number of pages
+collapsed, since "collapsed" could mean multiple things: (1) A PTE mapping
+being replaced by a PMD mapping, or (2) All 4K physical pages replaced by
+one 2M hugepage. Each may happen independently, or together, depending on
+the type of memory and the failures that occur. As such, this value should
+be interpreted roughly as a sign of progress, and counters in /proc/vmstat
+consulted for more accurate accounting)::
/sys/kernel/mm/transparent_hugepage/khugepaged/pages_collapsed
@@ -366,10 +373,9 @@ thp_split_pmd
page table entry.
thp_zero_page_alloc
- is incremented every time a huge zero page is
- successfully allocated. It includes allocations which where
- dropped due race with other allocation. Note, it doesn't count
- every map of the huge zero page, only its allocation.
+ is incremented every time a huge zero page used for thp is
+ successfully allocated. Note, it doesn't count every map of
+ the huge zero page, only its allocation.
thp_zero_page_alloc_failed
is incremented if kernel fails to allocate
diff --git a/Documentation/admin-guide/mm/userfaultfd.rst b/Documentation/admin-guide/mm/userfaultfd.rst
index 6528036093e1..83f31919ebb3 100644
--- a/Documentation/admin-guide/mm/userfaultfd.rst
+++ b/Documentation/admin-guide/mm/userfaultfd.rst
@@ -17,7 +17,10 @@ of the ``PROT_NONE+SIGSEGV`` trick.
Design
======
-Userfaults are delivered and resolved through the ``userfaultfd`` syscall.
+Userspace creates a new userfaultfd, initializes it, and registers one or more
+regions of virtual memory with it. Then, any page faults which occur within the
+region(s) result in a message being delivered to the userfaultfd, notifying
+userspace of the fault.
The ``userfaultfd`` (aside from registering and unregistering virtual
memory ranges) provides two primary functionalities:
@@ -34,12 +37,11 @@ The real advantage of userfaults if compared to regular virtual memory
management of mremap/mprotect is that the userfaults in all their
operations never involve heavyweight structures like vmas (in fact the
``userfaultfd`` runtime load never takes the mmap_lock for writing).
-
Vmas are not suitable for page- (or hugepage) granular fault tracking
when dealing with virtual address spaces that could span
Terabytes. Too many vmas would be needed for that.
-The ``userfaultfd`` once opened by invoking the syscall, can also be
+The ``userfaultfd``, once created, can also be
passed using unix domain sockets to a manager process, so the same
manager process could handle the userfaults of a multitude of
different processes without them being aware about what is going on
@@ -50,6 +52,39 @@ is a corner case that would currently return ``-EBUSY``).
API
===
+Creating a userfaultfd
+----------------------
+
+There are two ways to create a new userfaultfd, each of which provide ways to
+restrict access to this functionality (since historically userfaultfds which
+handle kernel page faults have been a useful tool for exploiting the kernel).
+
+The first way, supported since userfaultfd was introduced, is the
+userfaultfd(2) syscall. Access to this is controlled in several ways:
+
+- Any user can always create a userfaultfd which traps userspace page faults
+ only. Such a userfaultfd can be created using the userfaultfd(2) syscall
+ with the flag UFFD_USER_MODE_ONLY.
+
+- In order to also trap kernel page faults for the address space, either the
+ process needs the CAP_SYS_PTRACE capability, or the system must have
+ vm.unprivileged_userfaultfd set to 1. By default, vm.unprivileged_userfaultfd
+ is set to 0.
+
+The second way, added to the kernel more recently, is by opening
+/dev/userfaultfd and issuing a USERFAULTFD_IOC_NEW ioctl to it. This method
+yields equivalent userfaultfds to the userfaultfd(2) syscall.
+
+Unlike userfaultfd(2), access to /dev/userfaultfd is controlled via normal
+filesystem permissions (user/group/mode), which gives fine grained access to
+userfaultfd specifically, without also granting other unrelated privileges at
+the same time (as e.g. granting CAP_SYS_PTRACE would do). Users who have access
+to /dev/userfaultfd can always create userfaultfds that trap kernel page faults;
+vm.unprivileged_userfaultfd is not considered.
+
+Initializing a userfaultfd
+--------------------------
+
When first opened the ``userfaultfd`` must be enabled invoking the
``UFFDIO_API`` ioctl specifying a ``uffdio_api.api`` value set to ``UFFD_API`` (or
a later API version) which will specify the ``read/POLLIN`` protocol
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index ee6572b1edad..835c8844bba4 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -635,6 +635,17 @@ different types of memory (represented as different NUMA nodes) to
place the hot pages in the fast memory. This is implemented based on
unmapping and page fault too.
+numa_balancing_promote_rate_limit_MBps
+======================================
+
+Too high promotion/demotion throughput between different memory types
+may hurt application latency. This can be used to rate limit the
+promotion throughput. The per-node max promotion throughput in MB/s
+will be limited to be no more than the set value.
+
+A rule of thumb is to set this to less than 1/10 of the PMEM node
+write bandwidth.
+
oops_all_cpu_backtrace
======================
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 9b833e439f09..988f6a4c8084 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -926,6 +926,9 @@ calls without any restrictions.
The default value is 0.
+Another way to control permissions for userfaultfd is to use
+/dev/userfaultfd instead of userfaultfd(2). See
+Documentation/admin-guide/mm/userfaultfd.rst.
user_reserve_kbytes
===================
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index b0e7b4771fff..77eb775b8b42 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -37,6 +37,7 @@ Library functionality that is used throughout the kernel.
kref
assoc_array
xarray
+ maple_tree
idr
circular-buffers
rbtree
diff --git a/Documentation/core-api/maple_tree.rst b/Documentation/core-api/maple_tree.rst
new file mode 100644
index 000000000000..45defcf15da7
--- /dev/null
+++ b/Documentation/core-api/maple_tree.rst
@@ -0,0 +1,217 @@
+.. SPDX-License-Identifier: GPL-2.0+
+
+
+==========
+Maple Tree
+==========
+
+:Author: Liam R. Howlett
+
+Overview
+========
+
+The Maple Tree is a B-Tree data type which is optimized for storing
+non-overlapping ranges, including ranges of size 1. The tree was designed to
+be simple to use and does not require a user written search method. It
+supports iterating over a range of entries and going to the previous or next
+entry in a cache-efficient manner. The tree can also be put into an RCU-safe
+mode of operation which allows reading and writing concurrently. Writers must
+synchronize on a lock, which can be the default spinlock, or the user can set
+the lock to an external lock of a different type.
+
+The Maple Tree maintains a small memory footprint and was designed to use
+modern processor cache efficiently. The majority of the users will be able to
+use the normal API. An :ref:`maple-tree-advanced-api` exists for more complex
+scenarios. The most important usage of the Maple Tree is the tracking of the
+virtual memory areas.
+
+The Maple Tree can store values between ``0`` and ``ULONG_MAX``. The Maple
+Tree reserves values with the bottom two bits set to '10' which are below 4096
+(ie 2, 6, 10 .. 4094) for internal use. If the entries may use reserved
+entries then the users can convert the entries using xa_mk_value() and convert
+them back by calling xa_to_value(). If the user needs to use a reserved
+value, then the user can convert the value when using the
+:ref:`maple-tree-advanced-api`, but are blocked by the normal API.
+
+The Maple Tree can also be configured to support searching for a gap of a given
+size (or larger).
+
+Pre-allocating of nodes is also supported using the
+:ref:`maple-tree-advanced-api`. This is useful for users who must guarantee a
+successful store operation within a given
+code segment when allocating cannot be done. Allocations of nodes are
+relatively small at around 256 bytes.
+
+.. _maple-tree-normal-api:
+
+Normal API
+==========
+
+Start by initialising a maple tree, either with DEFINE_MTREE() for statically
+allocated maple trees or mt_init() for dynamically allocated ones. A
+freshly-initialised maple tree contains a ``NULL`` pointer for the range ``0``
+- ``ULONG_MAX``. There are currently two types of maple trees supported: the
+allocation tree and the regular tree. The regular tree has a higher branching
+factor for internal nodes. The allocation tree has a lower branching factor
+but allows the user to search for a gap of a given size or larger from either
+``0`` upwards or ``ULONG_MAX`` down. An allocation tree can be used by
+passing in the ``MT_FLAGS_ALLOC_RANGE`` flag when initialising the tree.
+
+You can then set entries using mtree_store() or mtree_store_range().
+mtree_store() will overwrite any entry with the new entry and return 0 on
+success or an error code otherwise. mtree_store_range() works in the same way
+but takes a range. mtree_load() is used to retrieve the entry stored at a
+given index. You can use mtree_erase() to erase an entire range by only
+knowing one value within that range, or mtree_store() call with an entry of
+NULL may be used to partially erase a range or many ranges at once.
+
+If you want to only store a new entry to a range (or index) if that range is
+currently ``NULL``, you can use mtree_insert_range() or mtree_insert() which
+return -EEXIST if the range is not empty.
+
+You can search for an entry from an index upwards by using mt_find().
+
+You can walk each entry within a range by calling mt_for_each(). You must
+provide a temporary variable to store a cursor. If you want to walk each
+element of the tree then ``0`` and ``ULONG_MAX`` may be used as the range. If
+the caller is going to hold the lock for the duration of the walk then it is
+worth looking at the mas_for_each() API in the :ref:`maple-tree-advanced-api`
+section.
+
+Sometimes it is necessary to ensure the next call to store to a maple tree does
+not allocate memory, please see :ref:`maple-tree-advanced-api` for this use case.
+
+Finally, you can remove all entries from a maple tree by calling
+mtree_destroy(). If the maple tree entries are pointers, you may wish to free
+the entries first.
+
+Allocating Nodes
+----------------
+
+The allocations are handled by the internal tree code. See
+:ref:`maple-tree-advanced-alloc` for other options.
+
+Locking
+-------
+
+You do not have to worry about locking. See :ref:`maple-tree-advanced-locks`
+for other options.
+
+The Maple Tree uses RCU and an internal spinlock to synchronise access:
+
+Takes RCU read lock:
+ * mtree_load()
+ * mt_find()
+ * mt_for_each()
+ * mt_next()
+ * mt_prev()
+
+Takes ma_lock internally:
+ * mtree_store()
+ * mtree_store_range()
+ * mtree_insert()
+ * mtree_insert_range()
+ * mtree_erase()
+ * mtree_destroy()
+ * mt_set_in_rcu()
+ * mt_clear_in_rcu()
+
+If you want to take advantage of the internal lock to protect the data
+structures that you are storing in the Maple Tree, you can call mtree_lock()
+before calling mtree_load(), then take a reference count on the object you
+have found before calling mtree_unlock(). This will prevent stores from
+removing the object from the tree between looking up the object and
+incrementing the refcount. You can also use RCU to avoid dereferencing
+freed memory, but an explanation of that is beyond the scope of this
+document.
+
+.. _maple-tree-advanced-api:
+
+Advanced API
+============
+
+The advanced API offers more flexibility and better performance at the
+cost of an interface which can be harder to use and has fewer safeguards.
+You must take care of your own locking while using the advanced API.
+You can use the ma_lock, RCU or an external lock for protection.
+You can mix advanced and normal operations on the same array, as long
+as the locking is compatible. The :ref:`maple-tree-normal-api` is implemented
+in terms of the advanced API.
+
+The advanced API is based around the ma_state, this is where the 'mas'
+prefix originates. The ma_state struct keeps track of tree operations to make
+life easier for both internal and external tree users.
+
+Initialising the maple tree is the same as in the :ref:`maple-tree-normal-api`.
+Please see above.
+
+The maple state keeps track of the range start and end in mas->index and
+mas->last, respectively.
+
+mas_walk() will walk the tree to the location of mas->index and set the
+mas->index and mas->last according to the range for the entry.
+
+You can set entries using mas_store(). mas_store() will overwrite any entry
+with the new entry and return the first existing entry that is overwritten.
+The range is passed in as members of the maple state: index and last.
+
+You can use mas_erase() to erase an entire range by setting index and
+last of the maple state to the desired range to erase. This will erase
+the first range that is found in that range, set the maple state index
+and last as the range that was erased and return the entry that existed
+at that location.
+
+You can walk each entry within a range by using mas_for_each(). If you want
+to walk each element of the tree then ``0`` and ``ULONG_MAX`` may be used as
+the range. If the lock needs to be periodically dropped, see the locking
+section mas_pause().
+
+Using a maple state allows mas_next() and mas_prev() to function as if the
+tree was a linked list. With such a high branching factor the amortized
+performance penalty is outweighed by cache optimization. mas_next() will
+return the next entry which occurs after the entry at index. mas_prev()
+will return the previous entry which occurs before the entry at index.
+
+mas_find() will find the first entry which exists at or above index on
+the first call, and the next entry from every subsequent calls.
+
+mas_find_rev() will find the fist entry which exists at or below the last on
+the first call, and the previous entry from every subsequent calls.
+
+If the user needs to yield the lock during an operation, then the maple state
+must be paused using mas_pause().
+
+There are a few extra interfaces provided when using an allocation tree.
+If you wish to search for a gap within a range, then mas_empty_area()
+or mas_empty_area_rev() can be used. mas_empty_area() searches for a gap
+starting at the lowest index given up to the maximum of the range.
+mas_empty_area_rev() searches for a gap starting at the highest index given
+and continues downward to the lower bound of the range.
+
+.. _maple-tree-advanced-alloc:
+
+Advanced Allocating Nodes
+-------------------------
+
+Allocations are usually handled internally to the tree, however if allocations
+need to occur before a write occurs then calling mas_expected_entries() will
+allocate the worst-case number of needed nodes to insert the provided number of
+ranges. This also causes the tree to enter mass insertion mode. Once
+insertions are complete calling mas_destroy() on the maple state will free the
+unused allocations.
+
+.. _maple-tree-advanced-locks:
+
+Advanced Locking
+----------------
+
+The maple tree uses a spinlock by default, but external locks can be used for
+tree updates as well. To use an external lock, the tree must be initialized
+with the ``MT_FLAGS_LOCK_EXTERN flag``, this is usually done with the
+MTREE_INIT_EXT() #define, which takes an external lock as an argument.
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/maple_tree.h
+.. kernel-doc:: lib/maple_tree.c
diff --git a/Documentation/core-api/mm-api.rst b/Documentation/core-api/mm-api.rst
index 1ebcc6c3fafe..f5dde5bceaea 100644
--- a/Documentation/core-api/mm-api.rst
+++ b/Documentation/core-api/mm-api.rst
@@ -19,9 +19,6 @@ User Space Memory Access
Memory Allocation Controls
==========================
-.. kernel-doc:: include/linux/gfp.h
- :internal:
-
.. kernel-doc:: include/linux/gfp_types.h
:doc: Page mobility and placement hints
diff --git a/Documentation/dev-tools/index.rst b/Documentation/dev-tools/index.rst
index 4621eac290f4..6b0663075dc0 100644
--- a/Documentation/dev-tools/index.rst
+++ b/Documentation/dev-tools/index.rst
@@ -24,6 +24,7 @@ Documentation/dev-tools/testing-overview.rst
kcov
gcov
kasan
+ kmsan
ubsan
kmemleak
kcsan
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 1772fd457fed..5c93ab915049 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -111,9 +111,17 @@ parameter can be used to control panic and reporting behaviour:
report or also panic the kernel (default: ``report``). The panic happens even
if ``kasan_multi_shot`` is enabled.
-Hardware Tag-Based KASAN mode (see the section about various modes below) is
-intended for use in production as a security mitigation. Therefore, it supports
-additional boot parameters that allow disabling KASAN or controlling features:
+Software and Hardware Tag-Based KASAN modes (see the section about various
+modes below) support altering stack trace collection behavior:
+
+- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
+ traces collection (default: ``on``).
+- ``kasan.stack_ring_size=<number of entries>`` specifies the number of entries
+ in the stack ring (default: ``32768``).
+
+Hardware Tag-Based KASAN mode is intended for use in production as a security
+mitigation. Therefore, it supports additional boot parameters that allow
+disabling KASAN altogether or controlling its features:
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
@@ -132,9 +140,6 @@ additional boot parameters that allow disabling KASAN or controlling features:
- ``kasan.vmalloc=off`` or ``=on`` disables or enables tagging of vmalloc
allocations (default: ``on``).
-- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
- traces collection (default: ``on``).
-
Error reports
~~~~~~~~~~~~~
diff --git a/Documentation/dev-tools/kmsan.rst b/Documentation/dev-tools/kmsan.rst
new file mode 100644
index 000000000000..2a53a801198c
--- /dev/null
+++ b/Documentation/dev-tools/kmsan.rst
@@ -0,0 +1,427 @@
+.. SPDX-License-Identifier: GPL-2.0
+.. Copyright (C) 2022, Google LLC.
+
+===================================
+The Kernel Memory Sanitizer (KMSAN)
+===================================
+
+KMSAN is a dynamic error detector aimed at finding uses of uninitialized
+values. It is based on compiler instrumentation, and is quite similar to the
+userspace `MemorySanitizer tool`_.
+
+An important note is that KMSAN is not intended for production use, because it
+drastically increases kernel memory footprint and slows the whole system down.
+
+Usage
+=====
+
+Building the kernel
+-------------------
+
+In order to build a kernel with KMSAN you will need a fresh Clang (14.0.6+).
+Please refer to `LLVM documentation`_ for the instructions on how to build Clang.
+
+Now configure and build the kernel with CONFIG_KMSAN enabled.
+
+Example report
+--------------
+
+Here is an example of a KMSAN report::
+
+ =====================================================
+ BUG: KMSAN: uninit-value in test_uninit_kmsan_check_memory+0x1be/0x380 [kmsan_test]
+ test_uninit_kmsan_check_memory+0x1be/0x380 mm/kmsan/kmsan_test.c:273
+ kunit_run_case_internal lib/kunit/test.c:333
+ kunit_try_run_case+0x206/0x420 lib/kunit/test.c:374
+ kunit_generic_run_threadfn_adapter+0x6d/0xc0 lib/kunit/try-catch.c:28
+ kthread+0x721/0x850 kernel/kthread.c:327
+ ret_from_fork+0x1f/0x30 ??:?
+
+ Uninit was stored to memory at:
+ do_uninit_local_array+0xfa/0x110 mm/kmsan/kmsan_test.c:260
+ test_uninit_kmsan_check_memory+0x1a2/0x380 mm/kmsan/kmsan_test.c:271
+ kunit_run_case_internal lib/kunit/test.c:333
+ kunit_try_run_case+0x206/0x420 lib/kunit/test.c:374
+ kunit_generic_run_threadfn_adapter+0x6d/0xc0 lib/kunit/try-catch.c:28
+ kthread+0x721/0x850 kernel/kthread.c:327
+ ret_from_fork+0x1f/0x30 ??:?
+
+ Local variable uninit created at:
+ do_uninit_local_array+0x4a/0x110 mm/kmsan/kmsan_test.c:256
+ test_uninit_kmsan_check_memory+0x1a2/0x380 mm/kmsan/kmsan_test.c:271
+
+ Bytes 4-7 of 8 are uninitialized
+ Memory access of size 8 starts at ffff888083fe3da0
+
+ CPU: 0 PID: 6731 Comm: kunit_try_catch Tainted: G B E 5.16.0-rc3+ #104
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
+ =====================================================
+
+The report says that the local variable ``uninit`` was created uninitialized in
+``do_uninit_local_array()``. The third stack trace corresponds to the place
+where this variable was created.
+
+The first stack trace shows where the uninit value was used (in
+``test_uninit_kmsan_check_memory()``). The tool shows the bytes which were left
+uninitialized in the local variable, as well as the stack where the value was
+copied to another memory location before use.
+
+A use of uninitialized value ``v`` is reported by KMSAN in the following cases:
+ - in a condition, e.g. ``if (v) { ... }``;
+ - in an indexing or pointer dereferencing, e.g. ``array[v]`` or ``*v``;
+ - when it is copied to userspace or hardware, e.g. ``copy_to_user(..., &v, ...)``;
+ - when it is passed as an argument to a function, and
+ ``CONFIG_KMSAN_CHECK_PARAM_RETVAL`` is enabled (see below).
+
+The mentioned cases (apart from copying data to userspace or hardware, which is
+a security issue) are considered undefined behavior from the C11 Standard point
+of view.
+
+Disabling the instrumentation
+-----------------------------
+
+A function can be marked with ``__no_kmsan_checks``. Doing so makes KMSAN
+ignore uninitialized values in that function and mark its output as initialized.
+As a result, the user will not get KMSAN reports related to that function.
+
+Another function attribute supported by KMSAN is ``__no_sanitize_memory``.
+Applying this attribute to a function will result in KMSAN not instrumenting
+it, which can be helpful if we do not want the compiler to interfere with some
+low-level code (e.g. that marked with ``noinstr`` which implicitly adds
+``__no_sanitize_memory``).
+
+This however comes at a cost: stack allocations from such functions will have
+incorrect shadow/origin values, likely leading to false positives. Functions
+called from non-instrumented code may also receive incorrect metadata for their
+parameters.
+
+As a rule of thumb, avoid using ``__no_sanitize_memory`` explicitly.
+
+It is also possible to disable KMSAN for a single file (e.g. main.o)::
+
+ KMSAN_SANITIZE_main.o := n
+
+or for the whole directory::
+
+ KMSAN_SANITIZE := n
+
+in the Makefile. Think of this as applying ``__no_sanitize_memory`` to every
+function in the file or directory. Most users won't need KMSAN_SANITIZE, unless
+their code gets broken by KMSAN (e.g. runs at early boot time).
+
+Support
+=======
+
+In order for KMSAN to work the kernel must be built with Clang, which so far is
+the only compiler that has KMSAN support. The kernel instrumentation pass is
+based on the userspace `MemorySanitizer tool`_.
+
+The runtime library only supports x86_64 at the moment.
+
+How KMSAN works
+===============
+
+KMSAN shadow memory
+-------------------
+
+KMSAN associates a metadata byte (also called shadow byte) with every byte of
+kernel memory. A bit in the shadow byte is set iff the corresponding bit of the
+kernel memory byte is uninitialized. Marking the memory uninitialized (i.e.
+setting its shadow bytes to ``0xff``) is called poisoning, marking it
+initialized (setting the shadow bytes to ``0x00``) is called unpoisoning.
+
+When a new variable is allocated on the stack, it is poisoned by default by
+instrumentation code inserted by the compiler (unless it is a stack variable
+that is immediately initialized). Any new heap allocation done without
+``__GFP_ZERO`` is also poisoned.
+
+Compiler instrumentation also tracks the shadow values as they are used along
+the code. When needed, instrumentation code invokes the runtime library in
+``mm/kmsan/`` to persist shadow values.
+
+The shadow value of a basic or compound type is an array of bytes of the same
+length. When a constant value is written into memory, that memory is unpoisoned.
+When a value is read from memory, its shadow memory is also obtained and
+propagated into all the operations which use that value. For every instruction
+that takes one or more values the compiler generates code that calculates the
+shadow of the result depending on those values and their shadows.
+
+Example::
+
+ int a = 0xff; // i.e. 0x000000ff
+ int b;
+ int c = a | b;
+
+In this case the shadow of ``a`` is ``0``, shadow of ``b`` is ``0xffffffff``,
+shadow of ``c`` is ``0xffffff00``. This means that the upper three bytes of
+``c`` are uninitialized, while the lower byte is initialized.
+
+Origin tracking
+---------------
+
+Every four bytes of kernel memory also have a so-called origin mapped to them.
+This origin describes the point in program execution at which the uninitialized
+value was created. Every origin is associated with either the full allocation
+stack (for heap-allocated memory), or the function containing the uninitialized
+variable (for locals).
+
+When an uninitialized variable is allocated on stack or heap, a new origin
+value is created, and that variable's origin is filled with that value. When a
+value is read from memory, its origin is also read and kept together with the
+shadow. For every instruction that takes one or more values, the origin of the
+result is one of the origins corresponding to any of the uninitialized inputs.
+If a poisoned value is written into memory, its origin is written to the
+corresponding storage as well.
+
+Example 1::
+
+ int a = 42;
+ int b;
+ int c = a + b;
+
+In this case the origin of ``b`` is generated upon function entry, and is
+stored to the origin of ``c`` right before the addition result is written into
+memory.
+
+Several variables may share the same origin address, if they are stored in the
+same four-byte chunk. In this case every write to either variable updates the
+origin for all of them. We have to sacrifice precision in this case, because
+storing origins for individual bits (and even bytes) would be too costly.
+
+Example 2::
+
+ int combine(short a, short b) {
+ union ret_t {
+ int i;
+ short s[2];
+ } ret;
+ ret.s[0] = a;
+ ret.s[1] = b;
+ return ret.i;
+ }
+
+If ``a`` is initialized and ``b`` is not, the shadow of the result would be
+0xffff0000, and the origin of the result would be the origin of ``b``.
+``ret.s[0]`` would have the same origin, but it will never be used, because
+that variable is initialized.
+
+If both function arguments are uninitialized, only the origin of the second
+argument is preserved.
+
+Origin chaining
+~~~~~~~~~~~~~~~
+
+To ease debugging, KMSAN creates a new origin for every store of an
+uninitialized value to memory. The new origin references both its creation stack
+and the previous origin the value had. This may cause increased memory
+consumption, so we limit the length of origin chains in the runtime.
+
+Clang instrumentation API
+-------------------------
+
+Clang instrumentation pass inserts calls to functions defined in
+``mm/kmsan/nstrumentation.c`` into the kernel code.
+
+Shadow manipulation
+~~~~~~~~~~~~~~~~~~~
+
+For every memory access the compiler emits a call to a function that returns a
+pair of pointers to the shadow and origin addresses of the given memory::
+
+ typedef struct {
+ void *shadow, *origin;
+ } shadow_origin_ptr_t
+
+ shadow_origin_ptr_t __msan_metadata_ptr_for_load_{1,2,4,8}(void *addr)
+ shadow_origin_ptr_t __msan_metadata_ptr_for_store_{1,2,4,8}(void *addr)
+ shadow_origin_ptr_t __msan_metadata_ptr_for_load_n(void *addr, uintptr_t size)
+ shadow_origin_ptr_t __msan_metadata_ptr_for_store_n(void *addr, uintptr_t size)
+
+The function name depends on the memory access size.
+
+The compiler makes sure that for every loaded value its shadow and origin
+values are read from memory. When a value is stored to memory, its shadow and
+origin are also stored using the metadata pointers.
+
+Handling locals
+~~~~~~~~~~~~~~~
+
+A special function is used to create a new origin value for a local variable and
+set the origin of that variable to that value::
+
+ void __msan_poison_alloca(void *addr, uintptr_t size, char *descr)
+
+Access to per-task data
+~~~~~~~~~~~~~~~~~~~~~~~
+
+At the beginning of every instrumented function KMSAN inserts a call to
+``__msan_get_context_state()``::
+
+ kmsan_context_state *__msan_get_context_state(void)
+
+``kmsan_context_state`` is declared in ``include/linux/kmsan.h``::
+
+ struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ depot_stack_handle_t retval_origin_tls;
+ };
+
+This structure is used by KMSAN to pass parameter shadows and origins between
+instrumented functions (unless the parameters are checked immediately by
+``CONFIG_KMSAN_CHECK_PARAM_RETVAL``).
+
+Passing uninitialized values to functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Clang's MemorySanitizer instrumentation has an option,
+``-fsanitize-memory-param-retval``, which makes the compiler check function
+parameters passed by value, as well as function return values.
+
+The option is controlled by ``CONFIG_KMSAN_CHECK_PARAM_RETVAL``, which is
+enabled by default to let KMSAN report uninitialized values earlier.
+Please refer to the `LKML discussion`_ for more details.
+
+Because of the way the checks are implemented in LLVM (they are only applied to
+parameters marked as ``noundef``), not all parameters are guaranteed to be
+checked, so we cannot give up the metadata storage in ``kmsan_context_state``.
+
+String functions
+~~~~~~~~~~~~~~~~
+
+The compiler replaces calls to ``memcpy()``/``memmove()``/``memset()`` with the
+following functions. These functions are also called when data structures are
+initialized or copied, making sure shadow and origin values are copied alongside
+with the data::
+
+ void *__msan_memcpy(void *dst, void *src, uintptr_t n)
+ void *__msan_memmove(void *dst, void *src, uintptr_t n)
+ void *__msan_memset(void *dst, int c, uintptr_t n)
+
+Error reporting
+~~~~~~~~~~~~~~~
+
+For each use of a value the compiler emits a shadow check that calls
+``__msan_warning()`` in the case that value is poisoned::
+
+ void __msan_warning(u32 origin)
+
+``__msan_warning()`` causes KMSAN runtime to print an error report.
+
+Inline assembly instrumentation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+KMSAN instruments every inline assembly output with a call to::
+
+ void __msan_instrument_asm_store(void *addr, uintptr_t size)
+
+, which unpoisons the memory region.
+
+This approach may mask certain errors, but it also helps to avoid a lot of
+false positives in bitwise operations, atomics etc.
+
+Sometimes the pointers passed into inline assembly do not point to valid memory.
+In such cases they are ignored at runtime.
+
+
+Runtime library
+---------------
+
+The code is located in ``mm/kmsan/``.
+
+Per-task KMSAN state
+~~~~~~~~~~~~~~~~~~~~
+
+Every task_struct has an associated KMSAN task state that holds the KMSAN
+context (see above) and a per-task flag disallowing KMSAN reports::
+
+ struct kmsan_context {
+ ...
+ bool allow_reporting;
+ struct kmsan_context_state cstate;
+ ...
+ }
+
+ struct task_struct {
+ ...
+ struct kmsan_context kmsan;
+ ...
+ }
+
+KMSAN contexts
+~~~~~~~~~~~~~~
+
+When running in a kernel task context, KMSAN uses ``current->kmsan.cstate`` to
+hold the metadata for function parameters and return values.
+
+But in the case the kernel is running in the interrupt, softirq or NMI context,
+where ``current`` is unavailable, KMSAN switches to per-cpu interrupt state::
+
+ DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
+
+Metadata allocation
+~~~~~~~~~~~~~~~~~~~
+
+There are several places in the kernel for which the metadata is stored.
+
+1. Each ``struct page`` instance contains two pointers to its shadow and
+origin pages::
+
+ struct page {
+ ...
+ struct page *shadow, *origin;
+ ...
+ };
+
+At boot-time, the kernel allocates shadow and origin pages for every available
+kernel page. This is done quite late, when the kernel address space is already
+fragmented, so normal data pages may arbitrarily interleave with the metadata
+pages.
+
+This means that in general for two contiguous memory pages their shadow/origin
+pages may not be contiguous. Consequently, if a memory access crosses the
+boundary of a memory block, accesses to shadow/origin memory may potentially
+corrupt other pages or read incorrect values from them.
+
+In practice, contiguous memory pages returned by the same ``alloc_pages()``
+call will have contiguous metadata, whereas if these pages belong to two
+different allocations their metadata pages can be fragmented.
+
+For the kernel data (``.data``, ``.bss`` etc.) and percpu memory regions
+there also are no guarantees on metadata contiguity.
+
+In the case ``__msan_metadata_ptr_for_XXX_YYY()`` hits the border between two
+pages with non-contiguous metadata, it returns pointers to fake shadow/origin regions::
+
+ char dummy_load_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+ char dummy_store_page[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
+
+``dummy_load_page`` is zero-initialized, so reads from it always yield zeroes.
+All stores to ``dummy_store_page`` are ignored.
+
+2. For vmalloc memory and modules, there is a direct mapping between the memory
+range, its shadow and origin. KMSAN reduces the vmalloc area by 3/4, making only
+the first quarter available to ``vmalloc()``. The second quarter of the vmalloc
+area contains shadow memory for the first quarter, the third one holds the
+origins. A small part of the fourth quarter contains shadow and origins for the
+kernel modules. Please refer to ``arch/x86/include/asm/pgtable_64_types.h`` for
+more details.
+
+When an array of pages is mapped into a contiguous virtual memory space, their
+shadow and origin pages are similarly mapped into contiguous regions.
+
+References
+==========
+
+E. Stepanov, K. Serebryany. `MemorySanitizer: fast detector of uninitialized
+memory use in C++
+<https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43308.pdf>`_.
+In Proceedings of CGO 2015.
+
+.. _MemorySanitizer tool: https://clang.llvm.org/docs/MemorySanitizer.html
+.. _LLVM documentation: https://llvm.org/docs/GettingStarted.html
+.. _LKML discussion: https://lore.kernel.org/all/20220614144853.3693273-1-glider@google.com/
diff --git a/Documentation/mm/index.rst b/Documentation/mm/index.rst
index 575ccd40e30c..4aa12b8be278 100644
--- a/Documentation/mm/index.rst
+++ b/Documentation/mm/index.rst
@@ -51,6 +51,7 @@ above structured documentation, or deleted if it has served its purpose.
ksm
memory-model
mmu_notifier
+ multigen_lru
numa
overcommit-accounting
page_migration
diff --git a/Documentation/mm/ksm.rst b/Documentation/mm/ksm.rst
index 9e37add068e6..f83cfbc12f4c 100644
--- a/Documentation/mm/ksm.rst
+++ b/Documentation/mm/ksm.rst
@@ -26,7 +26,7 @@ tree.
If a KSM page is shared between less than ``max_page_sharing`` VMAs,
the node of the stable tree that represents such KSM page points to a
-list of struct rmap_item and the ``page->mapping`` of the
+list of struct ksm_rmap_item and the ``page->mapping`` of the
KSM page points to the stable tree node.
When the sharing passes this threshold, KSM adds a second dimension to
diff --git a/Documentation/mm/multigen_lru.rst b/Documentation/mm/multigen_lru.rst
new file mode 100644
index 000000000000..d7062c6a8946
--- /dev/null
+++ b/Documentation/mm/multigen_lru.rst
@@ -0,0 +1,159 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+Multi-Gen LRU
+=============
+The multi-gen LRU is an alternative LRU implementation that optimizes
+page reclaim and improves performance under memory pressure. Page
+reclaim decides the kernel's caching policy and ability to overcommit
+memory. It directly impacts the kswapd CPU usage and RAM efficiency.
+
+Design overview
+===============
+Objectives
+----------
+The design objectives are:
+
+* Good representation of access recency
+* Try to profit from spatial locality
+* Fast paths to make obvious choices
+* Simple self-correcting heuristics
+
+The representation of access recency is at the core of all LRU
+implementations. In the multi-gen LRU, each generation represents a
+group of pages with similar access recency. Generations establish a
+(time-based) common frame of reference and therefore help make better
+choices, e.g., between different memcgs on a computer or different
+computers in a data center (for job scheduling).
+
+Exploiting spatial locality improves efficiency when gathering the
+accessed bit. A rmap walk targets a single page and does not try to
+profit from discovering a young PTE. A page table walk can sweep all
+the young PTEs in an address space, but the address space can be too
+sparse to make a profit. The key is to optimize both methods and use
+them in combination.
+
+Fast paths reduce code complexity and runtime overhead. Unmapped pages
+do not require TLB flushes; clean pages do not require writeback.
+These facts are only helpful when other conditions, e.g., access
+recency, are similar. With generations as a common frame of reference,
+additional factors stand out. But obvious choices might not be good
+choices; thus self-correction is necessary.
+
+The benefits of simple self-correcting heuristics are self-evident.
+Again, with generations as a common frame of reference, this becomes
+attainable. Specifically, pages in the same generation can be
+categorized based on additional factors, and a feedback loop can
+statistically compare the refault percentages across those categories
+and infer which of them are better choices.
+
+Assumptions
+-----------
+The protection of hot pages and the selection of cold pages are based
+on page access channels and patterns. There are two access channels:
+
+* Accesses through page tables
+* Accesses through file descriptors
+
+The protection of the former channel is by design stronger because:
+
+1. The uncertainty in determining the access patterns of the former
+ channel is higher due to the approximation of the accessed bit.
+2. The cost of evicting the former channel is higher due to the TLB
+ flushes required and the likelihood of encountering the dirty bit.
+3. The penalty of underprotecting the former channel is higher because
+ applications usually do not prepare themselves for major page
+ faults like they do for blocked I/O. E.g., GUI applications
+ commonly use dedicated I/O threads to avoid blocking rendering
+ threads.
+
+There are also two access patterns:
+
+* Accesses exhibiting temporal locality
+* Accesses not exhibiting temporal locality
+
+For the reasons listed above, the former channel is assumed to follow
+the former pattern unless ``VM_SEQ_READ`` or ``VM_RAND_READ`` is
+present, and the latter channel is assumed to follow the latter
+pattern unless outlying refaults have been observed.
+
+Workflow overview
+=================
+Evictable pages are divided into multiple generations for each
+``lruvec``. The youngest generation number is stored in
+``lrugen->max_seq`` for both anon and file types as they are aged on
+an equal footing. The oldest generation numbers are stored in
+``lrugen->min_seq[]`` separately for anon and file types as clean file
+pages can be evicted regardless of swap constraints. These three
+variables are monotonically increasing.
+
+Generation numbers are truncated into ``order_base_2(MAX_NR_GENS+1)``
+bits in order to fit into the gen counter in ``folio->flags``. Each
+truncated generation number is an index to ``lrugen->lists[]``. The
+sliding window technique is used to track at least ``MIN_NR_GENS`` and
+at most ``MAX_NR_GENS`` generations. The gen counter stores a value
+within ``[1, MAX_NR_GENS]`` while a page is on one of
+``lrugen->lists[]``; otherwise it stores zero.
+
+Each generation is divided into multiple tiers. A page accessed ``N``
+times through file descriptors is in tier ``order_base_2(N)``. Unlike
+generations, tiers do not have dedicated ``lrugen->lists[]``. In
+contrast to moving across generations, which requires the LRU lock,
+moving across tiers only involves atomic operations on
+``folio->flags`` and therefore has a negligible cost. A feedback loop
+modeled after the PID controller monitors refaults over all the tiers
+from anon and file types and decides which tiers from which types to
+evict or protect.
+
+There are two conceptually independent procedures: the aging and the
+eviction. They form a closed-loop system, i.e., the page reclaim.
+
+Aging
+-----
+The aging produces young generations. Given an ``lruvec``, it
+increments ``max_seq`` when ``max_seq-min_seq+1`` approaches
+``MIN_NR_GENS``. The aging promotes hot pages to the youngest
+generation when it finds them accessed through page tables; the
+demotion of cold pages happens consequently when it increments
+``max_seq``. The aging uses page table walks and rmap walks to find
+young PTEs. For the former, it iterates ``lruvec_memcg()->mm_list``
+and calls ``walk_page_range()`` with each ``mm_struct`` on this list
+to scan PTEs, and after each iteration, it increments ``max_seq``. For
+the latter, when the eviction walks the rmap and finds a young PTE,
+the aging scans the adjacent PTEs. For both, on finding a young PTE,
+the aging clears the accessed bit and updates the gen counter of the
+page mapped by this PTE to ``(max_seq%MAX_NR_GENS)+1``.
+
+Eviction
+--------
+The eviction consumes old generations. Given an ``lruvec``, it
+increments ``min_seq`` when ``lrugen->lists[]`` indexed by
+``min_seq%MAX_NR_GENS`` becomes empty. To select a type and a tier to
+evict from, it first compares ``min_seq[]`` to select the older type.
+If both types are equally old, it selects the one whose first tier has
+a lower refault percentage. The first tier contains single-use
+unmapped clean pages, which are the best bet. The eviction sorts a
+page according to its gen counter if the aging has found this page
+accessed through page tables and updated its gen counter. It also
+moves a page to the next generation, i.e., ``min_seq+1``, if this page
+was accessed multiple times through file descriptors and the feedback
+loop has detected outlying refaults from the tier this page is in. To
+this end, the feedback loop uses the first tier as the baseline, for
+the reason stated earlier.
+
+Summary
+-------
+The multi-gen LRU can be disassembled into the following parts:
+
+* Generations
+* Rmap walks
+* Page table walks
+* Bloom filters
+* PID controller
+
+The aging and the eviction form a producer-consumer model;
+specifically, the latter drives the former by the sliding window over
+generations. Within the aging, rmap walks drive page table walks by
+inserting hot densely populated page tables to the Bloom filters.
+Within the eviction, the PID controller uses refaults as the feedback
+to select types to evict and tiers to protect.
diff --git a/Documentation/mm/page_owner.rst b/Documentation/mm/page_owner.rst
index f5c954afe97c..f18fd8907049 100644
--- a/Documentation/mm/page_owner.rst
+++ b/Documentation/mm/page_owner.rst
@@ -94,6 +94,11 @@ Usage
Page allocated via order XXX, ...
PFN XXX ...
// Detailed stack
+ By default, it will do full pfn dump, to start with a given pfn,
+ page_owner supports fseek.
+
+ FILE *fp = fopen("/sys/kernel/debug/page_owner", "r");
+ fseek(fp, pfn_start, SEEK_SET);
The ``page_owner_sort`` tool ignores ``PFN`` rows, puts the remaining rows
in buf, uses regexp to extract the page order value, counts the times
diff --git a/MAINTAINERS b/MAINTAINERS
index 7547ffce5032..0b278ec54122 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11004,7 +11004,6 @@ F: arch/*/include/asm/*kasan.h
F: arch/*/mm/kasan_init*
F: include/linux/kasan*.h
F: lib/Kconfig.kasan
-F: lib/test_kasan*.c
F: mm/kasan/
F: scripts/Makefile.kasan
@@ -11438,6 +11437,20 @@ F: kernel/kmod.c
F: lib/test_kmod.c
F: tools/testing/selftests/kmod/
+KMSAN
+M: Alexander Potapenko <glider@google.com>
+R: Marco Elver <elver@google.com>
+R: Dmitry Vyukov <dvyukov@google.com>
+L: kasan-dev@googlegroups.com
+S: Maintained
+F: Documentation/dev-tools/kmsan.rst
+F: arch/*/include/asm/kmsan.h
+F: arch/*/mm/kmsan_*
+F: include/linux/kmsan*.h
+F: lib/Kconfig.kmsan
+F: mm/kmsan/
+F: scripts/Makefile.kmsan
+
KPROBES
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
@@ -12168,6 +12181,18 @@ L: linux-man@vger.kernel.org
S: Maintained
W: http://www.kernel.org/doc/man-pages
+MAPLE TREE
+M: Liam R. Howlett <Liam.Howlett@oracle.com>
+L: linux-mm@kvack.org
+S: Supported
+F: Documentation/core-api/maple_tree.rst
+F: include/linux/maple_tree.h
+F: include/trace/events/maple_tree.h
+F: lib/maple_tree.c
+F: lib/test_maple_tree.c
+F: tools/testing/radix-tree/linux/maple_tree.h
+F: tools/testing/radix-tree/maple.c
+
MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
L: linux-mips@vger.kernel.org
diff --git a/Makefile b/Makefile
index 85a63a1d29b3..cfbe6a7de640 100644
--- a/Makefile
+++ b/Makefile
@@ -1081,6 +1081,7 @@ include-y := scripts/Makefile.extrawarn
include-$(CONFIG_DEBUG_INFO) += scripts/Makefile.debug
include-$(CONFIG_KASAN) += scripts/Makefile.kasan
include-$(CONFIG_KCSAN) += scripts/Makefile.kcsan
+include-$(CONFIG_KMSAN) += scripts/Makefile.kmsan
include-$(CONFIG_UBSAN) += scripts/Makefile.ubsan
include-$(CONFIG_KCOV) += scripts/Makefile.kcov
include-$(CONFIG_RANDSTRUCT) += scripts/Makefile.randstruct
diff --git a/arch/Kconfig b/arch/Kconfig
index 266862428a84..8f138e580d1a 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -1416,6 +1416,14 @@ config DYNAMIC_SIGFRAME
config HAVE_ARCH_NODE_DEV_GROUP
bool
+config ARCH_HAS_NONLEAF_PMD_YOUNG
+ bool
+ help
+ Architectures that select this option are capable of setting the
+ accessed bit in non-leaf PMD entries when using them as part of linear
+ address translations. Page table walkers that clear the accessed bit
+ may use this capability to reduce their search space.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index 4aa996423b0d..763929e814e9 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -76,6 +76,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9e3653253ef2..d9a13ccf89a3 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -554,7 +554,7 @@ config ARC_BUILTIN_DTB_NAME
endmenu # "ARC Architecture Configuration"
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "12" if ARC_HUGEPAGE_16M
default "11"
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 68923a69b1d4..a08c9d092a33 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1362,7 +1362,7 @@ config ARM_MODULE_PLTS
Disabling this is usually safe for small single-platform
configurations. If unsure, say y.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "12" if SOC_AM33XX
default "9" if SA1111
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 6429c4106ab5..078d61b758a9 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -31,7 +31,7 @@ CONFIG_SOC_VF610=y
CONFIG_SMP=y
CONFIG_ARM_PSCI=y
CONFIG_HIGHMEM=y
-CONFIG_FORCE_MAX_ZONEORDER=14
+CONFIG_ARCH_FORCE_MAX_ORDER=14
CONFIG_CMDLINE="noinitrd console=ttymxc0,115200"
CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y
diff --git a/arch/arm/configs/milbeaut_m10v_defconfig b/arch/arm/configs/milbeaut_m10v_defconfig
index cdb505c74654..a2e25bf843cc 100644
--- a/arch/arm/configs/milbeaut_m10v_defconfig
+++ b/arch/arm/configs/milbeaut_m10v_defconfig
@@ -26,7 +26,7 @@ CONFIG_THUMB2_KERNEL=y
# CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11 is not set
# CONFIG_ARM_PATCH_IDIV is not set
CONFIG_HIGHMEM=y
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_ARCH_FORCE_MAX_ORDER=12
CONFIG_SECCOMP=y
CONFIG_KEXEC=y
CONFIG_EFI=y
diff --git a/arch/arm/configs/oxnas_v6_defconfig b/arch/arm/configs/oxnas_v6_defconfig
index d206c4f04490..70a67b3fc91b 100644
--- a/arch/arm/configs/oxnas_v6_defconfig
+++ b/arch/arm/configs/oxnas_v6_defconfig
@@ -12,7 +12,7 @@ CONFIG_ARCH_OXNAS=y
CONFIG_MACH_OX820=y
CONFIG_SMP=y
CONFIG_NR_CPUS=16
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_ARCH_FORCE_MAX_ORDER=12
CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index 2845fae4f3cc..d60cc9cc4c21 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -21,7 +21,7 @@ CONFIG_MACH_AKITA=y
CONFIG_MACH_BORZOI=y
CONFIG_PXA_SYSTEMS_CPLDS=y
CONFIG_AEABI=y
-CONFIG_FORCE_MAX_ZONEORDER=9
+CONFIG_ARCH_FORCE_MAX_ORDER=9
CONFIG_CMDLINE="root=/dev/ram0 ro"
CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y
diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig
index 72af50d9e48a..8f28c9d443f0 100644
--- a/arch/arm/configs/sama7_defconfig
+++ b/arch/arm/configs/sama7_defconfig
@@ -19,7 +19,7 @@ CONFIG_ATMEL_CLOCKSOURCE_TCB=y
# CONFIG_CACHE_L2X0 is not set
# CONFIG_ARM_PATCH_IDIV is not set
# CONFIG_CPU_SW_DOMAIN_PAN is not set
-CONFIG_FORCE_MAX_ZONEORDER=15
+CONFIG_ARCH_FORCE_MAX_ORDER=15
CONFIG_UACCESS_WITH_MEMCPY=y
# CONFIG_ATAGS is not set
CONFIG_CMDLINE="console=ttyS0,115200 earlyprintk ignore_loglevel"
diff --git a/arch/arm/configs/sp7021_defconfig b/arch/arm/configs/sp7021_defconfig
index aa7dfd670db5..5bca2eb59b86 100644
--- a/arch/arm/configs/sp7021_defconfig
+++ b/arch/arm/configs/sp7021_defconfig
@@ -17,7 +17,7 @@ CONFIG_ARCH_SUNPLUS=y
# CONFIG_VDSO is not set
CONFIG_SMP=y
CONFIG_THUMB2_KERNEL=y
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_ARCH_FORCE_MAX_ORDER=12
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_MODULES=y
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 12e0a162ece5..f6737d2f37b2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1431,7 +1431,7 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int
default "14" if ARM64_64K_PAGES
default "12" if ARM64_16K_PAGES
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b5df82aa99e6..71a1af42f0e8 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1082,24 +1082,13 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
* page after fork() + CoW for pfn mappings. We don't always have a
* hardware-managed access flag on arm64.
*/
-static inline bool arch_faults_on_old_pte(void)
-{
- /* The register read below requires a stable CPU to make any sense */
- cant_migrate();
-
- return !cpu_has_hw_af();
-}
-#define arch_faults_on_old_pte arch_faults_on_old_pte
+#define arch_has_hw_pte_young cpu_has_hw_af
/*
* Experimentally, it's cheap to set the access flag in hardware and we
* benefit from prefaulting mappings as 'old' to start with.
*/
-static inline bool arch_wants_old_prefaulted_pte(void)
-{
- return !arch_faults_on_old_pte();
-}
-#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
+#define arch_wants_old_prefaulted_pte cpu_has_hw_af
static inline bool pud_sect_supported(void)
{
diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c
index 98d67444a5b6..27ef7ad3ffd2 100644
--- a/arch/arm64/kernel/elfcore.c
+++ b/arch/arm64/kernel/elfcore.c
@@ -8,9 +8,9 @@
#include <asm/cpufeature.h>
#include <asm/mte.h>
-#define for_each_mte_vma(tsk, vma) \
+#define for_each_mte_vma(vmi, vma) \
if (system_supports_mte()) \
- for (vma = tsk->mm->mmap; vma; vma = vma->vm_next) \
+ for_each_vma(vmi, vma) \
if (vma->vm_flags & VM_MTE)
static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -81,8 +81,9 @@ Elf_Half elf_core_extra_phdrs(void)
{
struct vm_area_struct *vma;
int vma_count = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
vma_count++;
return vma_count;
@@ -91,8 +92,9 @@ Elf_Half elf_core_extra_phdrs(void)
int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
struct elf_phdr phdr;
phdr.p_type = PT_AARCH64_MEMTAG_MTE;
@@ -116,8 +118,9 @@ size_t elf_core_extra_data_size(void)
{
struct vm_area_struct *vma;
size_t data_size = 0;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma)
+ for_each_mte_vma(vmi, vma)
data_size += mte_vma_tag_dump_size(vma);
return data_size;
@@ -126,8 +129,9 @@ size_t elf_core_extra_data_size(void)
int elf_core_write_extra_data(struct coredump_params *cprm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, current->mm, 0);
- for_each_mte_vma(current, vma) {
+ for_each_mte_vma(vmi, vma) {
if (vma->vm_flags & VM_DONTDUMP)
continue;
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ac93a2ee9c07..99ae81ab91a7 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -133,10 +133,11 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 0795028f017c..35e9a468d13e 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -245,7 +245,7 @@ static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
{
VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
- return page_folio(pfn_to_page(swp_offset(entry)));
+ return page_folio(pfn_to_page(swp_offset_pfn(entry)));
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 3cbc2dc62baf..adee6ab36862 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -332,7 +332,7 @@ config HIGHMEM
select KMAP_LOCAL
default y
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "11"
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 26ac8ea15a9e..c6e06cdc738f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -200,7 +200,7 @@ config IA64_CYCLONE
Say Y here to enable support for IBM EXA Cyclone time source.
If you're unsure, answer N.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "MAX_ORDER (11 - 17)" if !HUGETLB_PAGE
range 11 17 if !HUGETLB_PAGE
default "17" if HUGETLB_PAGE
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
index 42ed5248fae9..84e8ce387b69 100644
--- a/arch/ia64/include/asm/sparsemem.h
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -11,10 +11,10 @@
#define SECTION_SIZE_BITS (30)
#define MAX_PHYSMEM_BITS (50)
-#ifdef CONFIG_FORCE_MAX_ZONEORDER
-#if ((CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
+#ifdef CONFIG_ARCH_FORCE_MAX_ORDER
+#if ((CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS)
#undef SECTION_SIZE_BITS
-#define SECTION_SIZE_BITS (CONFIG_FORCE_MAX_ZONEORDER - 1 + PAGE_SHIFT)
+#define SECTION_SIZE_BITS (CONFIG_ARCH_FORCE_MAX_ORDER - 1 + PAGE_SHIFT)
#endif
#endif
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index cfd976065a0d..e83789b34861 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -377,7 +377,7 @@ config NODES_SHIFT
default "6"
depends on NUMA
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 14 64 if PAGE_SIZE_64KB
default "14" if PAGE_SIZE_64KB
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index b0504b13b089..9380f6e3bb66 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -397,7 +397,7 @@ config SINGLE_MEMORY_CHUNK
order" to save memory that could be wasted for unused memory map.
Say N if not sure.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK
default "11"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 25dd4c5a8ef5..b26b77673c2c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2140,7 +2140,7 @@ config PAGE_SIZE_64KB
endchoice
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index a8b62df3c021..af070be1b583 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -9,7 +9,6 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 714169e411cf..48e4e251779b 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -3,7 +3,6 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 1be428663c10..c6e1fc77c996 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -103,6 +103,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 4167f1eb4cd8..a582f72104f3 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -44,7 +44,7 @@ menu "Kernel features"
source "kernel/Kconfig.hz"
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 9 20
default "11"
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index a7ea3204a5fa..22133a6a506e 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -70,6 +70,8 @@
#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */
#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */
+#define MADV_COLLAPSE 73 /* Synchronous hugepage collapse */
+
#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_SOFT_OFFLINE 101 /* soft offline page for testing */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 3feb7694e0ca..1d3b8bc8a623 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -657,15 +657,20 @@ static inline unsigned long mm_total_size(struct mm_struct *mm)
{
struct vm_area_struct *vma;
unsigned long usize = 0;
+ VMA_ITERATOR(vmi, mm, 0);
- for (vma = mm->mmap; vma && usize < parisc_cache_flush_threshold; vma = vma->vm_next)
+ for_each_vma(vmi, vma) {
+ if (usize >= parisc_cache_flush_threshold)
+ break;
usize += vma->vm_end - vma->vm_start;
+ }
return usize;
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
/*
* Flushing the whole cache on each cpu takes forever on
@@ -685,7 +690,7 @@ void flush_cache_mm(struct mm_struct *mm)
}
/* Flush mm */
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ for_each_vma(vmi, vma)
flush_cache_pages(vma, vma->vm_start, vma->vm_end);
}
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 81c9f895d690..699df27b0e2f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -846,7 +846,7 @@ config DATA_SHIFT
in that case. If PIN_TLB is selected, it must be aligned to 8M as
8M pages will be pinned.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES
default "9" if PPC64 && PPC_64K_PAGES
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index e6d878a44d33..ea719898b581 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -30,7 +30,7 @@ CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=17
+CONFIG_ARCH_FORCE_MAX_ORDER=17
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index f14c6dbd7346..ab8a8c4530d9 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -41,7 +41,7 @@ CONFIG_FIXED_PHY=y
CONFIG_FONT_8x16=y
CONFIG_FONT_8x8=y
CONFIG_FONTS=y
-CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_ARCH_FORCE_MAX_ORDER=13
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024
CONFIG_FTL=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 43e38f0fa5a7..c92652575064 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -17,7 +17,6 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index a25cf2ca5c7a..7497e17ea657 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -18,7 +18,6 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index e1f36fd61db3..4abc01949702 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -115,18 +115,18 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
mmap_read_lock(mm);
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, &vvar_spec))
zap_page_range(vma, vma->vm_start, size);
}
-
mmap_read_unlock(mm);
+
return 0;
}
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index 19f0ef950d77..9ad6b56bfec9 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -81,14 +81,15 @@ EXPORT_SYMBOL(hash__flush_range);
void hash__flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
+ VMA_ITERATOR(vmi, mm, 0);
/*
- * It is safe to go down the mm's list of vmas when called
- * from dup_mmap, holding mmap_lock. It would also be safe from
- * unmap_region or exit_mmap, but not from vmtruncate on SMP -
- * but it seems dup_mmap is the only SMP case which gets here.
+ * It is safe to iterate the vmas when called from dup_mmap,
+ * holding mmap_lock. It would also be safe from unmap_region
+ * or exit_mmap, but not from vmtruncate on SMP - but it seems
+ * dup_mmap is the only SMP case which gets here.
*/
- for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
+ for_each_vma(vmi, mp)
hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
}
EXPORT_SYMBOL(hash__flush_tlb_mm);
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 60c6ea16a972..d73b3b4176e8 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -149,24 +149,15 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, addr);
/*
* We don't try too hard, we just mark all the vma in that range
* VM_NOHUGEPAGE and split them.
*/
- vma = find_vma(mm, addr);
- /*
- * If the range is in unmapped range, just return
- */
- if (vma && ((addr + len) <= vma->vm_start))
- return;
-
- while (vma) {
- if (vma->vm_start >= (addr + len))
- break;
+ for_each_vma_range(vmi, vma, addr + len) {
vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &subpage_walk_ops, NULL);
- vma = vma->vm_next;
}
}
#else
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 69b05b6c181b..692e7ae3dcb8 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -114,11 +114,12 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
struct __vdso_info *vdso_info = mm->context.vdso_info;
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, vdso_info->dm))
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 5075cde77b29..535099f2736d 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -69,10 +69,11 @@ static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (!vma_is_special_mapping(vma, &vvar_mapping))
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index d7b3b193d108..58033dfcb6d4 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -81,8 +81,9 @@ unsigned long _copy_from_user_key(void *to, const void __user *from,
might_fault();
if (!should_fail_usercopy()) {
- instrument_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user_key(to, from, n, key);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 62758cb5872f..02d15c8dc92e 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2515,8 +2515,9 @@ static const struct mm_walk_ops thp_split_walk_ops = {
static inline void thp_split_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
- for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &thp_split_walk_ops, NULL);
@@ -2584,8 +2585,9 @@ int gmap_mark_unmergeable(void)
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int ret;
+ VMA_ITERATOR(vmi, mm, 0);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags);
if (ret)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 10e51ef9c79a..c299a18273ff 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -237,16 +237,6 @@ int pud_huge(pud_t pud)
return pud_large(pud);
}
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int flags)
-{
- if (flags & FOLL_GET)
- return NULL;
-
- return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
-}
-
bool __init arch_hugetlb_valid_size(unsigned long size)
{
if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
diff --git a/arch/sh/configs/ecovec24_defconfig b/arch/sh/configs/ecovec24_defconfig
index e699e2e04128..b52e14ccb450 100644
--- a/arch/sh/configs/ecovec24_defconfig
+++ b/arch/sh/configs/ecovec24_defconfig
@@ -8,7 +8,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_CPU_SUBTYPE_SH7724=y
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_ARCH_FORCE_MAX_ORDER=12
CONFIG_MEMORY_SIZE=0x10000000
CONFIG_FLATMEM_MANUAL=y
CONFIG_SH_ECOVEC=y
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig
index a8662b6927ec..97b7356639ed 100644
--- a/arch/sh/configs/sdk7786_defconfig
+++ b/arch/sh/configs/sdk7786_defconfig
@@ -16,7 +16,6 @@ CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_MEMCG=y
-CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_CGROUP=y
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig
index cb2f56468fe0..be478f3148f2 100644
--- a/arch/sh/configs/urquell_defconfig
+++ b/arch/sh/configs/urquell_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_MEMCG=y
-CONFIG_CGROUP_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index ba569cfb4368..411fdc0901f7 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -18,7 +18,7 @@ config PAGE_OFFSET
default "0x80000000" if MMU
default "0x00000000"
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 9 64 if PAGE_SIZE_16KB
default "9" if PAGE_SIZE_16KB
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 1c852bb530ec..4d3d1af90d52 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -269,7 +269,7 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y if SPARC64
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "13"
help
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index bc38f79ca3a3..ad449173a1a1 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -584,21 +584,19 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
void flush_tlb_mm(struct mm_struct *mm)
{
- struct vm_area_struct *vma = mm->mmap;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
- while (vma != NULL) {
+ for_each_vma(vmi, vma)
fix_range(mm, vma->vm_start, vma->vm_end, 0);
- vma = vma->vm_next;
- }
}
void force_flush_all(void)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma = mm->mmap;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
- while (vma != NULL) {
+ for_each_vma(vmi, vma)
fix_range(mm, vma->vm_start, vma->vm_end, 1);
- vma = vma->vm_next;
- }
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 088af7c84e5d..6d1879ef933a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -85,6 +85,7 @@ config X86
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_NONLEAF_PMD_YOUNG if PGTABLE_LEVELS > 2
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
select ARCH_HAS_COPY_MC if X86_64
select ARCH_HAS_SET_MEMORY
@@ -130,7 +131,9 @@ config X86
select CLKEVT_I8253
select CLOCKSOURCE_VALIDATE_LAST_CYCLE
select CLOCKSOURCE_WATCHDOG
- select DCACHE_WORD_ACCESS
+ # Word-size accesses may read uninitialized data past the trailing \0
+ # in strings and cause false KMSAN reports.
+ select DCACHE_WORD_ACCESS if !KMSAN
select DYNAMIC_SIGFRAME
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -168,6 +171,7 @@ config X86
select HAVE_ARCH_KASAN if X86_64
select HAVE_ARCH_KASAN_VMALLOC if X86_64
select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KMSAN if X86_64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
@@ -328,6 +332,10 @@ config GENERIC_ISA_DMA
def_bool y
depends on ISA_DMA_API
+config GENERIC_CSUM
+ bool
+ default y if KMSAN || KASAN
+
config GENERIC_BUG
def_bool y
depends on BUG
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index ffec8bb01ba8..9860ca5979f8 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -12,6 +12,7 @@
# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Kernel does not boot with kcov instrumentation here.
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 35ce1a64068b..3a261abb6d15 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -20,6 +20,7 @@
# Sanitizer runtimes are unavailable and cannot be linked for early boot code.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 381d3333b996..3e88b9df8c8f 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -11,6 +11,9 @@ include $(srctree)/lib/vdso/Makefile
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
+KMSAN_SANITIZE_vclock_gettime.o := n
+KMSAN_SANITIZE_vgetcpu.o := n
+
UBSAN_SANITIZE := n
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 1000d457c332..6292b960037b 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, &vvar_mapping))
zap_page_range(vma, vma->vm_start, size);
}
-
mmap_read_unlock(mm);
+
return 0;
}
#else
@@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_write_lock(mm);
/*
@@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure.
*/
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) {
mmap_write_unlock(mm);
diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h
index bca625a60186..6df6ece8a28e 100644
--- a/arch/x86/include/asm/checksum.h
+++ b/arch/x86/include/asm/checksum.h
@@ -1,9 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
-#define HAVE_CSUM_COPY_USER
-#define _HAVE_ARCH_CSUM_AND_COPY
-#ifdef CONFIG_X86_32
-# include <asm/checksum_32.h>
+#ifdef CONFIG_GENERIC_CSUM
+# include <asm-generic/checksum.h>
#else
-# include <asm/checksum_64.h>
+# define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1
+# define HAVE_CSUM_COPY_USER
+# define _HAVE_ARCH_CSUM_AND_COPY
+# ifdef CONFIG_X86_32
+# include <asm/checksum_32.h>
+# else
+# include <asm/checksum_64.h>
+# endif
#endif
diff --git a/arch/x86/include/asm/kmsan.h b/arch/x86/include/asm/kmsan.h
new file mode 100644
index 000000000000..8fa6ac0e2d76
--- /dev/null
+++ b/arch/x86/include/asm/kmsan.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * x86 KMSAN support.
+ *
+ * Copyright (C) 2022, Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ */
+
+#ifndef _ASM_X86_KMSAN_H
+#define _ASM_X86_KMSAN_H
+
+#ifndef MODULE
+
+#include <asm/cpu_entry_area.h>
+#include <asm/processor.h>
+#include <linux/mmzone.h>
+
+DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow);
+DECLARE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin);
+
+/*
+ * Functions below are declared in the header to make sure they are inlined.
+ * They all are called from kmsan_get_metadata() for every memory access in
+ * the kernel, so speed is important here.
+ */
+
+/*
+ * Compute metadata addresses for the CPU entry area on x86.
+ */
+static inline void *arch_kmsan_get_meta_or_null(void *addr, bool is_origin)
+{
+ unsigned long addr64 = (unsigned long)addr;
+ char *metadata_array;
+ unsigned long off;
+ int cpu;
+
+ if ((addr64 < CPU_ENTRY_AREA_BASE) ||
+ (addr64 >= (CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE)))
+ return NULL;
+ cpu = (addr64 - CPU_ENTRY_AREA_BASE) / CPU_ENTRY_AREA_SIZE;
+ off = addr64 - (unsigned long)get_cpu_entry_area(cpu);
+ if ((off < 0) || (off >= CPU_ENTRY_AREA_SIZE))
+ return NULL;
+ metadata_array = is_origin ? cpu_entry_area_origin :
+ cpu_entry_area_shadow;
+ return &per_cpu(metadata_array[off], cpu);
+}
+
+/*
+ * Taken from arch/x86/mm/physaddr.h to avoid using an instrumented version.
+ */
+static inline bool kmsan_phys_addr_valid(unsigned long addr)
+{
+ if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ return !(addr >> boot_cpu_data.x86_phys_bits);
+ else
+ return true;
+}
+
+/*
+ * Taken from arch/x86/mm/physaddr.c to avoid using an instrumented version.
+ */
+static inline bool kmsan_virt_addr_valid(void *addr)
+{
+ unsigned long x = (unsigned long)addr;
+ unsigned long y = x - __START_KERNEL_map;
+
+ /* use the carry flag to determine if x was < __START_KERNEL_map */
+ if (unlikely(x > y)) {
+ x = y + phys_base;
+
+ if (y >= KERNEL_IMAGE_SIZE)
+ return false;
+ } else {
+ x = y + (__START_KERNEL_map - PAGE_OFFSET);
+
+ /* carry flag will be set if starting x was >= PAGE_OFFSET */
+ if ((x > y) || !kmsan_phys_addr_valid(x))
+ return false;
+ }
+
+ return pfn_valid(x >> PAGE_SHIFT);
+}
+
+#endif /* !MODULE */
+
+#endif /* _ASM_X86_KMSAN_H */
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index baa70451b8df..198e03e59ca1 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -8,6 +8,8 @@
#include <asm/cpufeatures.h>
#include <asm/alternative.h>
+#include <linux/kmsan-checks.h>
+
/* duplicated to the one in bootmem.h */
extern unsigned long max_pfn;
extern unsigned long phys_base;
@@ -47,6 +49,11 @@ void clear_page_erms(void *page);
static inline void clear_page(void *page)
{
+ /*
+ * Clean up KMSAN metadata for the page being cleared. The assembly call
+ * below clobbers @page, so we perform unpoisoning before it.
+ */
+ kmsan_unpoison_memory(page, PAGE_SIZE);
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index e896ebef8c24..28421a887209 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -256,10 +256,10 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
/* We always extract/encode the offset by shifting it all the way up, and then down again */
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
-#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
-#define __swp_type(x) (((x).val) & 0x1f)
-#define __swp_offset(x) ((x).val >> 5)
-#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
+#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+#define __swp_type(x) (((x).val) & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x) ((x).val >> SWP_TYPE_BITS)
+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << SWP_TYPE_BITS})
/*
* Normally, __swp_entry() converts from arch-independent swp_entry_t to
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 44e2d6f1dbaa..5059799bebe3 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -815,7 +815,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
- return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
+ return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
+ (_KERNPG_TABLE & ~_PAGE_ACCESSED);
}
static inline unsigned long pages_to_mb(unsigned long npg)
@@ -1431,10 +1432,10 @@ static inline bool arch_has_pfn_modify_check(void)
return boot_cpu_has_bug(X86_BUG_L1TF);
}
-#define arch_faults_on_old_pte arch_faults_on_old_pte
-static inline bool arch_faults_on_old_pte(void)
+#define arch_has_hw_pte_young arch_has_hw_pte_young
+static inline bool arch_has_hw_pte_young(void)
{
- return false;
+ return true;
}
#ifdef CONFIG_PAGE_TABLE_CHECK
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 70e360a2e5fb..04f36063ad54 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -139,7 +139,52 @@ extern unsigned int ptrs_per_p4d;
# define VMEMMAP_START __VMEMMAP_BASE_L4
#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */
-#define VMALLOC_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
+/*
+ * End of the region for which vmalloc page tables are pre-allocated.
+ * For non-KMSAN builds, this is the same as VMALLOC_END.
+ * For KMSAN builds, VMALLOC_START..VMEMORY_END is 4 times bigger than
+ * VMALLOC_START..VMALLOC_END (see below).
+ */
+#define VMEMORY_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1)
+
+#ifndef CONFIG_KMSAN
+#define VMALLOC_END VMEMORY_END
+#else
+/*
+ * In KMSAN builds vmalloc area is four times smaller, and the remaining 3/4
+ * are used to keep the metadata for virtual pages. The memory formerly
+ * belonging to vmalloc area is now laid out as follows:
+ *
+ * 1st quarter: VMALLOC_START to VMALLOC_END - new vmalloc area
+ * 2nd quarter: KMSAN_VMALLOC_SHADOW_START to
+ * VMALLOC_END+KMSAN_VMALLOC_SHADOW_OFFSET - vmalloc area shadow
+ * 3rd quarter: KMSAN_VMALLOC_ORIGIN_START to
+ * VMALLOC_END+KMSAN_VMALLOC_ORIGIN_OFFSET - vmalloc area origins
+ * 4th quarter: KMSAN_MODULES_SHADOW_START to KMSAN_MODULES_ORIGIN_START
+ * - shadow for modules,
+ * KMSAN_MODULES_ORIGIN_START to
+ * KMSAN_MODULES_ORIGIN_START + MODULES_LEN - origins for modules.
+ */
+#define VMALLOC_QUARTER_SIZE ((VMALLOC_SIZE_TB << 40) >> 2)
+#define VMALLOC_END (VMALLOC_START + VMALLOC_QUARTER_SIZE - 1)
+
+/*
+ * vmalloc metadata addresses are calculated by adding shadow/origin offsets
+ * to vmalloc address.
+ */
+#define KMSAN_VMALLOC_SHADOW_OFFSET VMALLOC_QUARTER_SIZE
+#define KMSAN_VMALLOC_ORIGIN_OFFSET (VMALLOC_QUARTER_SIZE << 1)
+
+#define KMSAN_VMALLOC_SHADOW_START (VMALLOC_START + KMSAN_VMALLOC_SHADOW_OFFSET)
+#define KMSAN_VMALLOC_ORIGIN_START (VMALLOC_START + KMSAN_VMALLOC_ORIGIN_OFFSET)
+
+/*
+ * The shadow/origin for modules are placed one by one in the last 1/4 of
+ * vmalloc space.
+ */
+#define KMSAN_MODULES_SHADOW_START (VMALLOC_END + KMSAN_VMALLOC_ORIGIN_OFFSET + 1)
+#define KMSAN_MODULES_ORIGIN_START (KMSAN_MODULES_SHADOW_START + MODULES_LEN)
+#endif /* CONFIG_KMSAN */
#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
/* The module sections ends with the start of the fixmap */
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 6a9ccc1b2be5..64df897c0ee3 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_SPARSEMEM_H
#define _ASM_X86_SPARSEMEM_H
+#include <linux/types.h>
+
#ifdef CONFIG_SPARSEMEM
/*
* generic non-linear memory support:
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index 6e450827f677..3b87d889b6e1 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -11,11 +11,23 @@
function. */
#define __HAVE_ARCH_MEMCPY 1
+#if defined(__SANITIZE_MEMORY__)
+#undef memcpy
+void *__msan_memcpy(void *dst, const void *src, size_t size);
+#define memcpy __msan_memcpy
+#else
extern void *memcpy(void *to, const void *from, size_t len);
+#endif
extern void *__memcpy(void *to, const void *from, size_t len);
#define __HAVE_ARCH_MEMSET
+#if defined(__SANITIZE_MEMORY__)
+extern void *__msan_memset(void *s, int c, size_t n);
+#undef memset
+#define memset __msan_memset
+#else
void *memset(void *s, int c, size_t n);
+#endif
void *__memset(void *s, int c, size_t n);
#define __HAVE_ARCH_MEMSET16
@@ -55,7 +67,13 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
}
#define __HAVE_ARCH_MEMMOVE
+#if defined(__SANITIZE_MEMORY__)
+#undef memmove
+void *__msan_memmove(void *dest, const void *src, size_t len);
+#define memmove __msan_memmove
+#else
void *memmove(void *dest, const void *src, size_t count);
+#endif
void *__memmove(void *dest, const void *src, size_t count);
int memcmp(const void *cs, const void *ct, size_t count);
@@ -64,8 +82,7 @@ char *strcpy(char *dest, const char *src);
char *strcat(char *dest, const char *src);
int strcmp(const char *cs, const char *ct);
-#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
-
+#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__))
/*
* For files that not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
@@ -73,7 +90,9 @@ int strcmp(const char *cs, const char *ct);
#undef memcpy
#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#undef memmove
#define memmove(dst, src, len) __memmove(dst, src, len)
+#undef memset
#define memset(s, c, n) __memset(s, c, n)
#ifndef __NO_FORTIFY
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1ec6a9ea2328..8bc614cfe21b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -5,6 +5,7 @@
* User space memory access functions
*/
#include <linux/compiler.h>
+#include <linux/instrumented.h>
#include <linux/kasan-checks.h>
#include <linux/string.h>
#include <asm/asm.h>
@@ -103,6 +104,7 @@ extern int __get_user_bad(void);
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
: "0" (ptr), "i" (sizeof(*(ptr)))); \
+ instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
})
@@ -192,9 +194,11 @@ extern void __put_user_nocheck_8(void);
int __ret_pu; \
void __user *__ptr_pu; \
register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
- __chk_user_ptr(ptr); \
- __ptr_pu = (ptr); \
- __val_pu = (x); \
+ __typeof__(*(ptr)) __x = (x); /* eval x once */ \
+ __typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
+ __chk_user_ptr(__ptr); \
+ __ptr_pu = __ptr; \
+ __val_pu = __x; \
asm volatile("call __" #fn "_%P[size]" \
: "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \
@@ -202,6 +206,7 @@ extern void __put_user_nocheck_8(void);
"r" (__val_pu), \
[size] "i" (sizeof(*(ptr))) \
:"ebx"); \
+ instrument_put_user(__x, __ptr, sizeof(*(ptr))); \
__builtin_expect(__ret_pu, 0); \
})
@@ -248,23 +253,25 @@ extern void __put_user_nocheck_8(void);
#define __put_user_size(x, ptr, size, label) \
do { \
+ __typeof__(*(ptr)) __x = (x); /* eval x once */ \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __put_user_goto(x, ptr, "b", "iq", label); \
+ __put_user_goto(__x, ptr, "b", "iq", label); \
break; \
case 2: \
- __put_user_goto(x, ptr, "w", "ir", label); \
+ __put_user_goto(__x, ptr, "w", "ir", label); \
break; \
case 4: \
- __put_user_goto(x, ptr, "l", "ir", label); \
+ __put_user_goto(__x, ptr, "l", "ir", label); \
break; \
case 8: \
- __put_user_goto_u64(x, ptr, label); \
+ __put_user_goto_u64(__x, ptr, label); \
break; \
default: \
__put_user_bad(); \
} \
+ instrument_put_user(__x, ptr, size); \
} while (0)
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
@@ -305,6 +312,7 @@ do { \
default: \
(x) = __get_user_bad(); \
} \
+ instrument_get_user(x); \
} while (0)
#define __get_user_asm(x, addr, itype, ltype, label) \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f4479f6415d7..f901658d9f7c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,6 +29,8 @@ KASAN_SANITIZE_sev.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
KCSAN_SANITIZE := n
+KMSAN_SANITIZE_head$(BITS).o := n
+KMSAN_SANITIZE_nmi.o := n
# If instrumentation of this dir is enabled, boot hangs during first second.
# Probably could be more selective here, but note that files related to irqs,
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 9661e3e802be..f10a921ee756 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -12,6 +12,7 @@ endif
# If these files are instrumented, boot hangs during the first second.
KCOV_INSTRUMENT_common.o := n
KCOV_INSTRUMENT_perf_event.o := n
+KMSAN_SANITIZE_common.o := n
# As above, instrumenting secondary CPU boot code causes boot hangs.
KCSAN_SANITIZE_common.o := n
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index b3dba35f466e..0bf6779187dd 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -177,6 +177,12 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
}
}
+/*
+ * This function reads pointers from the stack and dereferences them. The
+ * pointers may not have their KMSAN shadow set up properly, which may result
+ * in false positive reports. Disable instrumentation to avoid those.
+ */
+__no_kmsan_checks
static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, const char *log_lvl)
{
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 1962008fe743..6b3418bff326 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -553,6 +553,7 @@ void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp, bool x32)
* Kprobes not supported here. Set the probe on schedule instead.
* Function graph tracer not supported too.
*/
+__no_kmsan_checks
__visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3bacd935f840..4c1bcb6053fc 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -95,7 +95,7 @@ void __init tboot_probe(void)
static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
- .mm_rb = RB_ROOT,
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, tboot_mm.mmap_lock),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 8e1c50c86e5d..d8ba93778ae3 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -183,6 +183,16 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
}
#endif
+/*
+ * While walking the stack, KMSAN may stomp on stale locals from other
+ * functions that were marked as uninitialized upon function exit, and
+ * now hold the call frame information for the current function (e.g. the frame
+ * pointer). Because KMSAN does not specifically mark call frames as
+ * initialized, false positive reports are possible. To prevent such reports,
+ * we mark the functions scanning the stack (here and below) with
+ * __no_kmsan_checks.
+ */
+__no_kmsan_checks
static bool update_stack_state(struct unwind_state *state,
unsigned long *next_bp)
{
@@ -250,6 +260,7 @@ static bool update_stack_state(struct unwind_state *state,
return true;
}
+__no_kmsan_checks
bool unwind_next_frame(struct unwind_state *state)
{
struct pt_regs *regs;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index f76747862bd2..7ba5f61d7273 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -65,7 +65,9 @@ ifneq ($(CONFIG_X86_CMPXCHG64),y)
endif
else
obj-y += iomap_copy_64.o
+ifneq ($(CONFIG_GENERIC_CSUM),y)
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
+endif
lib-y += clear_page_64.o copy_page_64.o
lib-y += memmove_64.o memset_64.o
lib-y += copy_user_64.o
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c
index 3e2f33fc33de..e0411a3774d4 100644
--- a/arch/x86/lib/iomem.c
+++ b/arch/x86/lib/iomem.c
@@ -1,6 +1,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/kmsan-checks.h>
#define movs(type,to,from) \
asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory")
@@ -37,6 +38,8 @@ static void string_memcpy_fromio(void *to, const volatile void __iomem *from, si
n-=2;
}
rep_movs(to, (const void *)from, n);
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(to, n);
}
static void string_memcpy_toio(volatile void __iomem *to, const void *from, size_t n)
@@ -44,6 +47,8 @@ static void string_memcpy_toio(volatile void __iomem *to, const void *from, size
if (unlikely(!n))
return;
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(from, n);
/* Align any unaligned destination IO */
if (unlikely(1 & (unsigned long)to)) {
movs("b", to, from);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 829c1409ffbd..c80febc44cd2 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -14,6 +14,8 @@ KASAN_SANITIZE_pgprot.o := n
# Disable KCSAN entirely, because otherwise we get warnings that some functions
# reference __initdata sections.
KCSAN_SANITIZE := n
+# Avoid recursion by not calling KMSAN hooks for CEA code.
+KMSAN_SANITIZE_cpu_entry_area.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_mem_encrypt.o = -pg
@@ -44,6 +46,9 @@ obj-$(CONFIG_HIGHMEM) += highmem_32.o
KASAN_SANITIZE_kasan_init_$(BITS).o := n
obj-$(CONFIG_KASAN) += kasan_init_$(BITS).o
+KMSAN_SANITIZE_kmsan_shadow.o := n
+obj-$(CONFIG_KMSAN) += kmsan_shadow.o
+
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a498ae1fbe66..7b0d4ab894c8 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -260,7 +260,7 @@ static noinline int vmalloc_fault(unsigned long address)
}
NOKPROBE_SYMBOL(vmalloc_fault);
-void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+static void __arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -284,6 +284,27 @@ void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
}
}
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+{
+ __arch_sync_kernel_mappings(start, end);
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN maintains two additional metadata page mappings for the
+ * [VMALLOC_START, VMALLOC_END) range. These mappings start at
+ * KMSAN_VMALLOC_SHADOW_START and KMSAN_VMALLOC_ORIGIN_START and
+ * have to be synced together with the vmalloc memory mapping.
+ */
+ if (start >= VMALLOC_START && end < VMALLOC_END) {
+ __arch_sync_kernel_mappings(
+ start - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START,
+ end - VMALLOC_START + KMSAN_VMALLOC_SHADOW_START);
+ __arch_sync_kernel_mappings(
+ start - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START,
+ end - VMALLOC_START + KMSAN_VMALLOC_ORIGIN_START);
+ }
+#endif
+}
+
static bool low_pfn(unsigned long pfn)
{
return pfn < max_low_pfn;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 82a042c03824..9121bc1b9453 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -1054,7 +1054,7 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
}
#ifdef CONFIG_SWAP
-unsigned long max_swapfile_size(void)
+unsigned long arch_max_swapfile_size(void)
{
unsigned long pages;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7ea7d4745681..3f040c6e5d13 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1288,7 +1288,7 @@ static void __init preallocate_vmalloc_pages(void)
unsigned long addr;
const char *lvl;
- for (addr = VMALLOC_START; addr <= VMALLOC_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
+ for (addr = VMALLOC_START; addr <= VMEMORY_END; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
pgd_t *pgd = pgd_offset_k(addr);
p4d_t *p4d;
pud_t *pud;
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 1ad0228f8ceb..78c5bc654cff 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -17,6 +17,7 @@
#include <linux/cc_platform.h>
#include <linux/efi.h>
#include <linux/pgtable.h>
+#include <linux/kmsan.h>
#include <asm/set_memory.h>
#include <asm/e820/api.h>
@@ -479,6 +480,8 @@ void iounmap(volatile void __iomem *addr)
return;
}
+ kmsan_iounmap_page_range((unsigned long)addr,
+ (unsigned long)addr + get_vm_area_size(p));
memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */
diff --git a/arch/x86/mm/kmsan_shadow.c b/arch/x86/mm/kmsan_shadow.c
new file mode 100644
index 000000000000..bee2ec4a3bfa
--- /dev/null
+++ b/arch/x86/mm/kmsan_shadow.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * x86-specific bits of KMSAN shadow implementation.
+ *
+ * Copyright (C) 2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ */
+
+#include <asm/cpu_entry_area.h>
+#include <linux/percpu-defs.h>
+
+/*
+ * Addresses within the CPU entry area (including e.g. exception stacks) do not
+ * have struct page entries corresponding to them, so they need separate
+ * handling.
+ * arch_kmsan_get_meta_or_null() (declared in the header) maps the addresses in
+ * CPU entry area to addresses in cpu_entry_area_shadow/cpu_entry_area_origin.
+ */
+DEFINE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_shadow);
+DEFINE_PER_CPU(char[CPU_ENTRY_AREA_SIZE], cpu_entry_area_origin);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index a932d7712d85..8525f2876fb4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma,
return ret;
}
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
@@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
return ret;
}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pudp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pud_t *pudp)
{
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 83f1b6a56449..f614009d3e4e 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -10,6 +10,7 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 12ac277282ba..bcb0c5d2abc2 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -771,7 +771,7 @@ config HIGHMEM
If unsure, say Y.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
default "11"
help
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 7966a58af472..1ff0c858544f 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -111,6 +111,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 201356faa7e6..b3c2450d6f23 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -58,6 +58,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct vm_area_struct *vmm;
+ struct vma_iterator vmi;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
@@ -79,15 +80,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else
addr = PAGE_ALIGN(addr);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
- if (!vmm || addr + len <= vm_start_gap(vmm))
- return addr;
+ vma_iter_init(&vmi, current->mm, addr);
+ for_each_vma(vmi, vmm) {
+ /* At this point: (addr < vmm->vm_end). */
+ if (addr + len <= vm_start_gap(vmm))
+ break;
+
addr = vmm->vm_end;
if (flags & MAP_SHARED)
addr = COLOUR_ALIGN(addr, pgoff);
}
+
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+
+ return addr;
}
#endif
diff --git a/block/bio.c b/block/bio.c
index 7cb7d2ff139b..ec350e040b64 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -867,6 +867,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
if (*same_page)
return true;
+ else if (IS_ENABLED(CONFIG_KMSAN))
+ return false;
return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
}
diff --git a/block/blk.h b/block/blk.h
index 5350bf363035..d6ea0d1a6db0 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -88,6 +88,13 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+ /*
+ * Merging adjacent physical pages may not work correctly under KMSAN
+ * if their metadata pages aren't adjacent. Just disable merging.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ return false;
+
if (addr1 + vec1->bv_len != addr2)
return false;
if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 2589ad5357df..d779667671b2 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1391,6 +1391,7 @@ endmenu
config CRYPTO_HASH_INFO
bool
+if !KMSAN # avoid false positives from assembly
if ARM
source "arch/arm/crypto/Kconfig"
endif
@@ -1412,6 +1413,7 @@ endif
if X86
source "arch/x86/crypto/Kconfig"
endif
+endif
source "drivers/crypto/Kconfig"
source "crypto/asymmetric_keys/Kconfig"
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bc60c9cd3230..9aa0da991cfb 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -869,12 +869,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
}
}
-/* return true if the memory block is offlined, otherwise, return false */
-bool is_memblock_offlined(struct memory_block *mem)
-{
- return mem->state == MEM_OFFLINE;
-}
-
static struct attribute *memory_root_attrs[] = {
#ifdef CONFIG_ARCH_MEMORY_PROBE
&dev_attr_probe.attr,
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 432d40a5f910..faf3597a96da 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/hugetlb.h>
static struct bus_type node_subsys = {
.name = "node",
@@ -589,64 +590,9 @@ static const struct attribute_group *node_dev_groups[] = {
NULL
};
-#ifdef CONFIG_HUGETLBFS
-/*
- * hugetlbfs per node attributes registration interface:
- * When/if hugetlb[fs] subsystem initializes [sometime after this module],
- * it will register its per node attributes for all online nodes with
- * memory. It will also call register_hugetlbfs_with_node(), below, to
- * register its attribute registration functions with this node driver.
- * Once these hooks have been initialized, the node driver will call into
- * the hugetlb module to [un]register attributes for hot-plugged nodes.
- */
-static node_registration_func_t __hugetlb_register_node;
-static node_registration_func_t __hugetlb_unregister_node;
-
-static inline bool hugetlb_register_node(struct node *node)
-{
- if (__hugetlb_register_node &&
- node_state(node->dev.id, N_MEMORY)) {
- __hugetlb_register_node(node);
- return true;
- }
- return false;
-}
-
-static inline void hugetlb_unregister_node(struct node *node)
-{
- if (__hugetlb_unregister_node)
- __hugetlb_unregister_node(node);
-}
-
-void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister)
-{
- __hugetlb_register_node = doregister;
- __hugetlb_unregister_node = unregister;
-}
-#else
-static inline void hugetlb_register_node(struct node *node) {}
-
-static inline void hugetlb_unregister_node(struct node *node) {}
-#endif
-
static void node_device_release(struct device *dev)
{
- struct node *node = to_node(dev);
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- /*
- * We schedule the work only when a memory section is
- * onlined/offlined on this node. When we come here,
- * all the memory on this node has been offlined,
- * so we won't enqueue new work to this work.
- *
- * The work is using node->node_work, so we should
- * flush work before freeing the memory.
- */
- flush_work(&node->node_work);
-#endif
- kfree(node);
+ kfree(to_node(dev));
}
/*
@@ -665,13 +611,13 @@ static int register_node(struct node *node, int num)
node->dev.groups = node_dev_groups;
error = device_register(&node->dev);
- if (error)
+ if (error) {
put_device(&node->dev);
- else {
+ } else {
hugetlb_register_node(node);
-
compaction_register_node(node);
}
+
return error;
}
@@ -684,8 +630,8 @@ static int register_node(struct node *node, int num)
*/
void unregister_node(struct node *node)
{
+ hugetlb_unregister_node(node);
compaction_unregister_node(node);
- hugetlb_unregister_node(node); /* no-op, if memoryless node */
node_remove_accesses(node);
node_remove_caches(node);
device_unregister(&node->dev);
@@ -907,74 +853,8 @@ void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
(void *)&nid, func);
return;
}
-
-#ifdef CONFIG_HUGETLBFS
-/*
- * Handle per node hstate attribute [un]registration on transistions
- * to/from memoryless state.
- */
-static void node_hugetlb_work(struct work_struct *work)
-{
- struct node *node = container_of(work, struct node, node_work);
-
- /*
- * We only get here when a node transitions to/from memoryless state.
- * We can detect which transition occurred by examining whether the
- * node has memory now. hugetlb_register_node() already check this
- * so we try to register the attributes. If that fails, then the
- * node has transitioned to memoryless, try to unregister the
- * attributes.
- */
- if (!hugetlb_register_node(node))
- hugetlb_unregister_node(node);
-}
-
-static void init_node_hugetlb_work(int nid)
-{
- INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
-}
-
-static int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- struct memory_notify *mnb = arg;
- int nid = mnb->status_change_nid;
-
- switch (action) {
- case MEM_ONLINE:
- case MEM_OFFLINE:
- /*
- * offload per node hstate [un]registration to a work thread
- * when transitioning to/from memoryless state.
- */
- if (nid != NUMA_NO_NODE)
- schedule_work(&node_devices[nid]->node_work);
- break;
-
- case MEM_GOING_ONLINE:
- case MEM_GOING_OFFLINE:
- case MEM_CANCEL_ONLINE:
- case MEM_CANCEL_OFFLINE:
- default:
- break;
- }
-
- return NOTIFY_OK;
-}
-#endif /* CONFIG_HUGETLBFS */
#endif /* CONFIG_MEMORY_HOTPLUG */
-#if !defined(CONFIG_MEMORY_HOTPLUG) || !defined(CONFIG_HUGETLBFS)
-static inline int node_memory_callback(struct notifier_block *self,
- unsigned long action, void *arg)
-{
- return NOTIFY_OK;
-}
-
-static void init_node_hugetlb_work(int nid) { }
-
-#endif
-
int __register_one_node(int nid)
{
int error;
@@ -993,8 +873,6 @@ int __register_one_node(int nid)
}
INIT_LIST_HEAD(&node_devices[nid]->access_list);
- /* initialize work queue for memory hot plug */
- init_node_hugetlb_work(nid);
node_init_caches(nid);
return error;
@@ -1065,13 +943,8 @@ static const struct attribute_group *cpu_root_attr_groups[] = {
NULL,
};
-#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
void __init node_dev_init(void)
{
- static struct notifier_block node_memory_callback_nb = {
- .notifier_call = node_memory_callback,
- .priority = NODE_CALLBACK_PRI,
- };
int ret, i;
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
@@ -1081,8 +954,6 @@ void __init node_dev_init(void)
if (ret)
panic("%s() failed to register subsystem: %d\n", __func__, ret);
- register_hotmemory_notifier(&node_memory_callback_nb);
-
/*
* Create all node devices, which will properly link the node
* to applicable memory block devices and already created cpu devices.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e551433cd107..7c74d8cba44f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -329,8 +329,8 @@ static ssize_t idle_store(struct device *dev,
if (!sysfs_streq(buf, "all")) {
/*
- * If it did not parse as 'all' try to treat it as an integer when
- * we have memory tracking enabled.
+ * If it did not parse as 'all' try to treat it as an integer
+ * when we have memory tracking enabled.
*/
u64 age_sec;
@@ -345,7 +345,10 @@ static ssize_t idle_store(struct device *dev,
if (!init_done(zram))
goto out_unlock;
- /* A cutoff_time of 0 marks everything as idle, this is the "all" behavior */
+ /*
+ * A cutoff_time of 0 marks everything as idle, this is the
+ * "all" behavior.
+ */
mark_idle(zram, cutoff_time);
rv = len;
@@ -1410,9 +1413,19 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- if (!IS_ERR((void *)handle))
+ if (IS_ERR((void *)handle))
+ return PTR_ERR((void *)handle);
+
+ if (comp_len != PAGE_SIZE)
goto compress_again;
- return PTR_ERR((void *)handle);
+ /*
+ * If the page is not compressible, you need to acquire the
+ * lock and execute the code below. The zcomp_stream_get()
+ * call is needed to disable the cpu hotplug and grab the
+ * zstrm buffer back. It is necessary that the dereferencing
+ * of the zstrm variable below occurs correctly.
+ */
+ zstrm = zcomp_stream_get(zram->comp);
}
alloced_pages = zs_get_total_pages(zram->mem_pool);
@@ -1710,9 +1723,6 @@ out:
static void zram_reset_device(struct zram *zram)
{
- struct zcomp *comp;
- u64 disksize;
-
down_write(&zram->init_lock);
zram->limit_pages = 0;
@@ -1722,17 +1732,15 @@ static void zram_reset_device(struct zram *zram)
return;
}
- comp = zram->comp;
- disksize = zram->disksize;
- zram->disksize = 0;
-
set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0, 0);
/* I/O operation under all of CPU are done so let's free */
- zram_meta_free(zram, disksize);
+ zram_meta_free(zram, zram->disksize);
+ zram->disksize = 0;
memset(&zram->stats, 0, sizeof(zram->stats));
- zcomp_destroy(comp);
+ zcomp_destroy(zram->comp);
+ zram->comp = NULL;
reset_bdev(zram);
up_write(&zram->init_lock);
@@ -2126,6 +2134,8 @@ static int __init zram_init(void)
{
int ret;
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > BITS_PER_LONG);
+
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);
if (ret < 0)
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 80c3b43b4828..a2bda53020fd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -30,16 +30,15 @@
/*
- * The lower ZRAM_FLAG_SHIFT bits of table.flags is for
- * object size (excluding header), the higher bits is for
- * zram_pageflags.
+ * ZRAM is mainly used for memory efficiency so we want to keep memory
+ * footprint small and thus squeeze size and zram pageflags into a flags
+ * member. The lower ZRAM_FLAG_SHIFT bits is for object size (excluding
+ * header), which cannot be larger than PAGE_SIZE (requiring PAGE_SHIFT
+ * bits), the higher bits are for zram_pageflags.
*
- * zram is mainly used for memory efficiency so we want to keep memory
- * footprint small so we can squeeze size and flags into a field.
- * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
- * the higher bits is for zram_pageflags.
+ * We use BUILD_BUG_ON() to make sure that zram pageflags don't overflow.
*/
-#define ZRAM_FLAG_SHIFT 24
+#define ZRAM_FLAG_SHIFT (PAGE_SHIFT + 1)
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index a37622060fff..4852a2dbdb27 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -11,9 +11,17 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/memory-tiers.h>
#include "dax-private.h"
#include "bus.h"
+/*
+ * Default abstract distance assigned to the NUMA node onlined
+ * by DAX/kmem if the low level platform driver didn't initialize
+ * one for this NUMA node.
+ */
+#define MEMTIER_DEFAULT_DAX_ADISTANCE (MEMTIER_ADISTANCE_DRAM * 5)
+
/* Memory resource name used for add_memory_driver_managed(). */
static const char *kmem_name;
/* Set if any memory will remain added when the driver will be unloaded. */
@@ -41,6 +49,7 @@ struct dax_kmem_data {
struct resource *res[];
};
+static struct memory_dev_type *dax_slowmem_type;
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
@@ -79,11 +88,13 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ init_node_memory_type(numa_node, dax_slowmem_type);
+
+ rc = -ENOMEM;
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto err_dax_kmem_data;
- rc = -ENOMEM;
data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
if (!data->res_name)
goto err_res_name;
@@ -155,6 +166,8 @@ err_reg_mgid:
kfree(data->res_name);
err_res_name:
kfree(data);
+err_dax_kmem_data:
+ clear_node_memory_type(numa_node, dax_slowmem_type);
return rc;
}
@@ -162,6 +175,7 @@ err_res_name:
static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
{
int i, success = 0;
+ int node = dev_dax->target_node;
struct device *dev = &dev_dax->dev;
struct dax_kmem_data *data = dev_get_drvdata(dev);
@@ -198,6 +212,14 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
kfree(data->res_name);
kfree(data);
dev_set_drvdata(dev, NULL);
+ /*
+ * Clear the memtype association on successful unplug.
+ * If not, we have memory blocks left which can be
+ * offlined/onlined later. We need to keep memory_dev_type
+ * for that. This implies this reference will be around
+ * till next reboot.
+ */
+ clear_node_memory_type(node, dax_slowmem_type);
}
}
#else
@@ -228,9 +250,22 @@ static int __init dax_kmem_init(void)
if (!kmem_name)
return -ENOMEM;
+ dax_slowmem_type = alloc_memory_type(MEMTIER_DEFAULT_DAX_ADISTANCE);
+ if (IS_ERR(dax_slowmem_type)) {
+ rc = PTR_ERR(dax_slowmem_type);
+ goto err_dax_slowmem_type;
+ }
+
rc = dax_driver_register(&device_dax_kmem_driver);
if (rc)
- kfree_const(kmem_name);
+ goto error_dax_driver;
+
+ return rc;
+
+error_dax_driver:
+ destroy_memory_type(dax_slowmem_type);
+err_dax_slowmem_type:
+ kfree_const(kmem_name);
return rc;
}
@@ -239,6 +274,7 @@ static void __exit dax_kmem_exit(void)
dax_driver_unregister(&device_dax_kmem_driver);
if (!any_hotremove_failed)
kfree_const(kmem_name);
+ destroy_memory_type(dax_slowmem_type);
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 11857af72859..9624735f1575 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -59,7 +59,7 @@ static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
struct mm_struct efi_mm = {
- .mm_rb = RB_ROOT,
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.write_protect_seq = SEQCNT_ZERO(efi_mm.write_protect_seq),
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 01a01be4a2a0..b1601aad7e1a 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -57,6 +57,7 @@ GCOV_PROFILE := n
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8423df021b71..d4398948f016 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
static int
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
{
- const unsigned long end = addr + len;
+ VMA_ITERATOR(vmi, mm, addr);
struct vm_area_struct *vma;
- int ret = -EFAULT;
mmap_read_lock(mm);
- for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+ for_each_vma_range(vmi, vma, addr + len) {
/* Check for holes, note that we also update the addr below */
if (vma->vm_start > addr)
break;
@@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
break;
- if (vma->vm_end >= end) {
- ret = 0;
- break;
- }
-
addr = vma->vm_end;
}
mmap_read_unlock(mm);
- return ret;
+ if (vma)
+ return -EFAULT;
+ return 0;
}
/*
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 250e213cc80c..3e19344eda93 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/kmsan-checks.h>
#include <linux/serio.h>
#include <linux/i8042.h>
#include <linux/libps2.h>
@@ -294,9 +295,11 @@ int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command)
serio_pause_rx(ps2dev->serio);
- if (param)
+ if (param) {
for (i = 0; i < receive; i++)
param[i] = ps2dev->cmdbuf[(receive - 1) - i];
+ kmsan_unpoison_memory(param, receive);
+ }
if (ps2dev->cmdcnt &&
(command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) {
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 60c829113299..2c64f55cf01f 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -280,22 +280,6 @@ void cxl_handle_fault(struct work_struct *fault_work)
mmput(mm);
}
-static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
-{
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_one unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
-
- cxl_fault_segment(ctx, mm, ea);
-
- mmput(mm);
-}
-
static u64 next_segment(u64 ea, u64 vsid)
{
if (vsid & SLB_VSID_B_1T)
@@ -306,23 +290,16 @@ static u64 next_segment(u64 ea, u64 vsid)
return ea + 1;
}
-static void cxl_prefault_vma(struct cxl_context *ctx)
+static void cxl_prefault_vma(struct cxl_context *ctx, struct mm_struct *mm)
{
u64 ea, last_esid = 0;
struct copro_slb slb;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int rc;
- struct mm_struct *mm;
-
- mm = get_mem_context(ctx);
- if (mm == NULL) {
- pr_devel("cxl_prefault_vm unable to get mm %i\n",
- pid_nr(ctx->pid));
- return;
- }
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
for (ea = vma->vm_start; ea < vma->vm_end;
ea = next_segment(ea, slb.vsid)) {
rc = copro_calculate_slb(mm, ea, &slb);
@@ -337,20 +314,28 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
}
}
mmap_read_unlock(mm);
-
- mmput(mm);
}
void cxl_prefault(struct cxl_context *ctx, u64 wed)
{
+ struct mm_struct *mm = get_mem_context(ctx);
+
+ if (mm == NULL) {
+ pr_devel("cxl_prefault unable to get mm %i\n",
+ pid_nr(ctx->pid));
+ return;
+ }
+
switch (ctx->afu->prefault_mode) {
case CXL_PREFAULT_WED:
- cxl_prefault_one(ctx, wed);
+ cxl_fault_segment(ctx, mm, wed);
break;
case CXL_PREFAULT_ALL:
- cxl_prefault_vma(ctx);
+ cxl_prefault_vma(ctx, mm);
break;
default:
break;
}
+
+ mmput(mm);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index cd4c410da5a5..9e63b8c43f3e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,6 +76,7 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
+ depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
select CRYPTO
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ec5219680092..85ca5b4da3cf 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 64
+#define MAX_STRUCT_PAGE_SIZE 128
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0e92ab4b3283..61af072ac98f 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -787,7 +787,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 64. We
+ * Also make sure size of struct page is less than 128. We
* want to make sure we use large enough size here so that
* we don't have a dynamic reserve space depending on
* struct page size. But we also want to make sure we notice
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 28f87cd8b3ed..290b1bb0e9cd 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -492,15 +492,18 @@ static bool is_normal_memory(pgprot_t p)
#endif
}
-static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
+static int __check_mem_type(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
{
- while (vma && is_normal_memory(vma->vm_page_prot)) {
- if (vma->vm_end >= end)
- return 0;
- vma = vma->vm_next;
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, start);
+
+ for_each_vma_range(vmi, vma, end) {
+ if (!is_normal_memory(vma->vm_page_prot))
+ return -EINVAL;
}
- return -EINVAL;
+ return 0;
}
int optee_check_mem_type(unsigned long start, size_t num_pages)
@@ -516,8 +519,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
return 0;
mmap_read_lock(mm);
- rc = __check_mem_type(find_vma(mm, start),
- start + num_pages * PAGE_SIZE);
+ rc = __check_mem_type(mm, start, start + num_pages * PAGE_SIZE);
mmap_read_unlock(mm);
return rc;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 33d62d7e3929..9f3c54032556 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/log2.h>
+#include <linux/kmsan.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/hcd.h>
@@ -426,6 +427,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
URB_DMA_SG_COMBINED);
urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
+ kmsan_handle_urb(urb, is_out);
if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
dev->state < USB_STATE_CONFIGURED)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index af16a7e8c67e..2e7689bb933b 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
+#include <linux/kmsan.h>
#include <linux/spinlock.h>
#include <xen/xen.h>
@@ -352,8 +353,15 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
- if (!vq->use_dma_api)
+ if (!vq->use_dma_api) {
+ /*
+ * If DMA is not used, KMSAN doesn't know that the scatterlist
+ * is initialized by the hardware. Explicitly check/unpoison it
+ * depending on the direction.
+ */
+ kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
return (dma_addr_t)sg_phys(sg);
+ }
/*
* We can't use dma_map_sg, because we don't use scatterlists in
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index e88e8f6f0a33..fae50a24630b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -282,7 +282,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
struct page, lru);
struct privcmd_mmap_entry *msg = page_address(page);
- vma = find_vma(mm, msg->va);
+ vma = vma_lookup(mm, msg->va);
rc = -EINVAL;
if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
diff --git a/fs/Kconfig b/fs/Kconfig
index a547307c1ae8..2685a4d0d353 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -235,6 +235,7 @@ config ARCH_SUPPORTS_HUGETLBFS
config HUGETLBFS
bool "HugeTLB file system support"
depends on X86 || IA64 || SPARC64 || ARCH_SUPPORTS_HUGETLBFS || BROKEN
+ depends on (SYSFS || SYSCTL)
help
hugetlbfs is a filesystem backing for HugeTLB pages, based on
ramfs. For architectures that support it, say Y here and read
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c0a08064b0a7..f1f051ad3147 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
#include <linux/time.h>
@@ -219,8 +220,7 @@ static noinline void end_compressed_writeback(struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long index = cb->start >> PAGE_SHIFT;
unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
- struct page *pages[16];
- unsigned long nr_pages = end_index - index + 1;
+ struct folio_batch fbatch;
const int errno = blk_status_to_errno(cb->status);
int i;
int ret;
@@ -228,24 +228,23 @@ static noinline void end_compressed_writeback(struct inode *inode,
if (errno)
mapping_set_error(inode->i_mapping, errno);
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- if (ret == 0) {
- nr_pages -= 1;
- index += 1;
- continue;
- }
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ ret = filemap_get_folios(inode->i_mapping, &index, end_index,
+ &fbatch);
+
+ if (ret == 0)
+ return;
+
for (i = 0; i < ret; i++) {
+ struct folio *folio = fbatch.folios[i];
+
if (errno)
- SetPageError(pages[i]);
- btrfs_page_clamp_clear_writeback(fs_info, pages[i],
+ folio_set_error(folio);
+ btrfs_page_clamp_clear_writeback(fs_info, &folio->page,
cb->start, cb->len);
- put_page(pages[i]);
}
- nr_pages -= ret;
- index += ret;
+ folio_batch_release(&fbatch);
}
/* the inode may be gone now */
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1eae68fbae21..4dcf22e051ff 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -270,9 +270,8 @@ static int __process_pages_contig(struct address_space *mapping,
pgoff_t start_index = start >> PAGE_SHIFT;
pgoff_t end_index = end >> PAGE_SHIFT;
pgoff_t index = start_index;
- unsigned long nr_pages = end_index - start_index + 1;
unsigned long pages_processed = 0;
- struct page *pages[16];
+ struct folio_batch fbatch;
int err = 0;
int i;
@@ -281,16 +280,17 @@ static int __process_pages_contig(struct address_space *mapping,
ASSERT(processed_end && *processed_end == start);
}
- if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
+ if ((page_ops & PAGE_SET_ERROR) && start_index <= end_index)
mapping_set_error(mapping, -EIO);
- while (nr_pages > 0) {
- int found_pages;
+ folio_batch_init(&fbatch);
+ while (index <= end_index) {
+ int found_folios;
+
+ found_folios = filemap_get_folios_contig(mapping, &index,
+ end_index, &fbatch);
- found_pages = find_get_pages_contig(mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- if (found_pages == 0) {
+ if (found_folios == 0) {
/*
* Only if we're going to lock these pages, we can find
* nothing at @index.
@@ -300,23 +300,20 @@ static int __process_pages_contig(struct address_space *mapping,
goto out;
}
- for (i = 0; i < found_pages; i++) {
+ for (i = 0; i < found_folios; i++) {
int process_ret;
-
+ struct folio *folio = fbatch.folios[i];
process_ret = process_one_page(fs_info, mapping,
- pages[i], locked_page, page_ops,
+ &folio->page, locked_page, page_ops,
start, end);
if (process_ret < 0) {
- for (; i < found_pages; i++)
- put_page(pages[i]);
err = -EAGAIN;
+ folio_batch_release(&fbatch);
goto out;
}
- put_page(pages[i]);
- pages_processed++;
+ pages_processed += folio_nr_pages(folio);
}
- nr_pages -= found_pages;
- index += found_pages;
+ folio_batch_release(&fbatch);
cond_resched();
}
out:
diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c
index 6fc2b77ae5c3..9a176af847d7 100644
--- a/fs/btrfs/subpage.c
+++ b/fs/btrfs/subpage.c
@@ -337,7 +337,7 @@ bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
*
* Even with 0 returned, the page still need extra check to make sure
* it's really the correct page, as the caller is using
- * find_get_pages_contig(), which can race with page invalidating.
+ * filemap_get_folios_contig(), which can race with page invalidating.
*/
int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len)
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index f69ec4d2d6eb..350da449db08 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -4,6 +4,7 @@
*/
#include <linux/pagemap.h>
+#include <linux/pagevec.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sizes.h>
@@ -20,39 +21,40 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
unsigned long flags)
{
int ret;
- struct page *pages[16];
+ struct folio_batch fbatch;
unsigned long index = start >> PAGE_SHIFT;
unsigned long end_index = end >> PAGE_SHIFT;
- unsigned long nr_pages = end_index - index + 1;
int i;
int count = 0;
int loops = 0;
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long, nr_pages,
- ARRAY_SIZE(pages)), pages);
+ folio_batch_init(&fbatch);
+
+ while (index <= end_index) {
+ ret = filemap_get_folios_contig(inode->i_mapping, &index,
+ end_index, &fbatch);
for (i = 0; i < ret; i++) {
+ struct folio *folio = fbatch.folios[i];
+
if (flags & PROCESS_TEST_LOCKED &&
- !PageLocked(pages[i]))
+ !folio_test_locked(folio))
count++;
- if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
- unlock_page(pages[i]);
- put_page(pages[i]);
+ if (flags & PROCESS_UNLOCK && folio_test_locked(folio))
+ folio_unlock(folio);
if (flags & PROCESS_RELEASE)
- put_page(pages[i]);
+ folio_put(folio);
}
- nr_pages -= ret;
- index += ret;
+ folio_batch_release(&fbatch);
cond_resched();
loops++;
if (loops > 100000) {
printk(KERN_ERR
- "stuck in a loop, start %llu, end %llu, nr_pages %lu, ret %d\n",
- start, end, nr_pages, ret);
+ "stuck in a loop, start %llu, end %llu, ret %d\n",
+ start, end, ret);
break;
}
}
+
return count;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 0a7ba84c1905..b927f6981ad1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -152,7 +152,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
/*
* Default synchronous end-of-IO handler.. Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
+ * unlock the buffer.
*/
void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
{
@@ -491,8 +491,8 @@ int inode_has_buffers(struct inode *inode)
* all already-submitted IO to complete, but does not queue any new
* writes to the disk.
*
- * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
- * you dirty the buffers, and then use osync_inode_buffers to wait for
+ * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
+ * as you dirty the buffers, and then use osync_inode_buffers to wait for
* completion. Any other dirty buffers which are not yet queued for
* write will not be flushed to disk by the osync.
*/
@@ -562,7 +562,7 @@ void write_boundary_block(struct block_device *bdev,
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
- ll_rw_block(REQ_OP_WRITE, 1, &bh);
+ write_dirty_buffer(bh, 0);
put_bh(bh);
}
}
@@ -1342,23 +1342,12 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
+ bh_readahead(bh, REQ_RAHEAD);
brelse(bh);
}
}
EXPORT_SYMBOL(__breadahead);
-void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
- gfp_t gfp)
-{
- struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
- if (likely(bh)) {
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
- brelse(bh);
- }
-}
-EXPORT_SYMBOL(__breadahead_gfp);
-
/**
* __bread_gfp() - reads a specified block and returns the bh
* @bdev: the block_device to read from
@@ -1817,7 +1806,7 @@ done:
/*
* The page was marked dirty, but the buffers were
* clean. Someone wrote them back by hand with
- * ll_rw_block/submit_bh. A rare case.
+ * write_dirty_buffer/submit_bh. A rare case.
*/
end_page_writeback(page);
@@ -2033,7 +2022,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(REQ_OP_READ, 1, &bh);
+ bh_read_nowait(bh, 0);
*wait_bh++=bh;
}
}
@@ -2352,7 +2341,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
struct address_space *mapping = inode->i_mapping;
const struct address_space_operations *aops = mapping->a_ops;
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
int err;
err = inode_newsize_ok(inode, size);
@@ -2378,7 +2367,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
const struct address_space_operations *aops = mapping->a_ops;
unsigned int blocksize = i_blocksize(inode);
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
pgoff_t index, curidx;
loff_t curpos;
unsigned zerofrom, offset, len;
@@ -2593,11 +2582,9 @@ int block_truncate_page(struct address_space *mapping,
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
- err = -EIO;
- ll_rw_block(REQ_OP_READ, 1, &bh);
- wait_on_buffer(bh);
+ err = bh_read(bh, 0);
/* Uhhuh. Read error. Complain and punt. */
- if (!buffer_uptodate(bh))
+ if (err < 0)
goto unlock;
}
@@ -2725,61 +2712,6 @@ void submit_bh(blk_opf_t opf, struct buffer_head *bh)
}
EXPORT_SYMBOL(submit_bh);
-/**
- * ll_rw_block: low-level access to block devices (DEPRECATED)
- * @opf: block layer request operation and flags.
- * @nr: number of &struct buffer_heads in the array
- * @bhs: array of pointers to &struct buffer_head
- *
- * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
- * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
- * @opf contains flags modifying the detailed I/O behavior, most notably
- * %REQ_RAHEAD.
- *
- * This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit), any buffer that appears to be clean when doing a write
- * request, and any buffer that appears to be up-to-date when doing read
- * request. Further it marks as clean buffers that are processed for
- * writing (the buffer cache won't assume that they are actually clean
- * until the buffer gets unlocked).
- *
- * ll_rw_block sets b_end_io to simple completion handler that marks
- * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
- * any waiters.
- *
- * All of the buffers must be for the same device, and must also be a
- * multiple of the current approved size for the device.
- */
-void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
-{
- const enum req_op op = opf & REQ_OP_MASK;
- int i;
-
- for (i = 0; i < nr; i++) {
- struct buffer_head *bh = bhs[i];
-
- if (!trylock_buffer(bh))
- continue;
- if (op == REQ_OP_WRITE) {
- if (test_clear_buffer_dirty(bh)) {
- bh->b_end_io = end_buffer_write_sync;
- get_bh(bh);
- submit_bh(opf, bh);
- continue;
- }
- } else {
- if (!buffer_uptodate(bh)) {
- bh->b_end_io = end_buffer_read_sync;
- get_bh(bh);
- submit_bh(opf, bh);
- continue;
- }
- }
- unlock_buffer(bh);
- }
-}
-EXPORT_SYMBOL(ll_rw_block);
-
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
{
lock_buffer(bh);
@@ -3026,29 +2958,69 @@ int bh_uptodate_or_lock(struct buffer_head *bh)
EXPORT_SYMBOL(bh_uptodate_or_lock);
/**
- * bh_submit_read - Submit a locked buffer for reading
+ * __bh_read - Submit read for a locked buffer
* @bh: struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @wait: wait until reading finish
*
- * Returns zero on success and -EIO on error.
+ * Returns zero on success or don't wait, and -EIO on error.
*/
-int bh_submit_read(struct buffer_head *bh)
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
{
- BUG_ON(!buffer_locked(bh));
+ int ret = 0;
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- return 0;
- }
+ BUG_ON(!buffer_locked(bh));
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(REQ_OP_READ, bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return 0;
- return -EIO;
+ submit_bh(REQ_OP_READ | op_flags, bh);
+ if (wait) {
+ wait_on_buffer(bh);
+ if (!buffer_uptodate(bh))
+ ret = -EIO;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__bh_read);
+
+/**
+ * __bh_read_batch - Submit read for a batch of unlocked buffers
+ * @nr: entry number of the buffer batch
+ * @bhs: a batch of struct buffer_head
+ * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
+ * @force_lock: force to get a lock on the buffer if set, otherwise drops any
+ * buffer that cannot lock.
+ *
+ * Returns zero on success or don't wait, and -EIO on error.
+ */
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ struct buffer_head *bh = bhs[i];
+
+ if (buffer_uptodate(bh))
+ continue;
+
+ if (force_lock)
+ lock_buffer(bh);
+ else
+ if (!trylock_buffer(bh))
+ continue;
+
+ if (buffer_uptodate(bh)) {
+ unlock_buffer(bh);
+ continue;
+ }
+
+ bh->b_end_io = end_buffer_read_sync;
+ get_bh(bh);
+ submit_bh(REQ_OP_READ | op_flags, bh);
+ }
}
-EXPORT_SYMBOL(bh_submit_read);
+EXPORT_SYMBOL(__bh_read_batch);
void __init buffer_init(void)
{
diff --git a/fs/coredump.c b/fs/coredump.c
index 1897bc445062..7bad7785e8e6 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -1100,30 +1100,20 @@ whole:
return vma->vm_end - vma->vm_start;
}
-static struct vm_area_struct *first_vma(struct task_struct *tsk,
- struct vm_area_struct *gate_vma)
-{
- struct vm_area_struct *ret = tsk->mm->mmap;
-
- if (ret)
- return ret;
- return gate_vma;
-}
-
/*
* Helper function for iterating across a vma list. It ensures that the caller
* will visit `gate_vma' prior to terminating the search.
*/
-static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
+static struct vm_area_struct *coredump_next_vma(struct ma_state *mas,
+ struct vm_area_struct *vma,
struct vm_area_struct *gate_vma)
{
- struct vm_area_struct *ret;
-
- ret = this_vma->vm_next;
- if (ret)
- return ret;
- if (this_vma == gate_vma)
+ if (gate_vma && (vma == gate_vma))
return NULL;
+
+ vma = mas_next(mas, ULONG_MAX);
+ if (vma)
+ return vma;
return gate_vma;
}
@@ -1147,9 +1137,10 @@ static void free_vma_snapshot(struct coredump_params *cprm)
*/
static bool dump_vma_snapshot(struct coredump_params *cprm)
{
- struct vm_area_struct *vma, *gate_vma;
+ struct vm_area_struct *gate_vma, *vma = NULL;
struct mm_struct *mm = current->mm;
- int i;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ int i = 0;
/*
* Once the stack expansion code is fixed to not change VMA bounds
@@ -1169,8 +1160,7 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
return false;
}
- for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
- vma = next_vma(vma, gate_vma), i++) {
+ while ((vma = coredump_next_vma(&mas, vma, gate_vma)) != NULL) {
struct core_vma_metadata *m = cprm->vma_meta + i;
m->start = vma->vm_start;
@@ -1178,10 +1168,10 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
m->flags = vma->vm_flags;
m->dump_size = vma_dump_size(vma, cprm->mm_flags);
m->pgoff = vma->vm_pgoff;
-
m->file = vma->vm_file;
if (m->file)
get_file(m->file);
+ i++;
}
mmap_write_unlock(mm);
diff --git a/fs/exec.c b/fs/exec.c
index de084e485462..349a5da91efe 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -28,7 +28,6 @@
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
-#include <linux/vmacache.h>
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
@@ -683,6 +682,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long length = old_end - old_start;
unsigned long new_start = old_start - shift;
unsigned long new_end = old_end - shift;
+ VMA_ITERATOR(vmi, mm, new_start);
+ struct vm_area_struct *next;
struct mmu_gather tlb;
BUG_ON(new_start > new_end);
@@ -691,7 +692,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* ensure there are no vmas between where we want to go
* and where we are
*/
- if (vma != find_vma(mm, new_start))
+ if (vma != vma_next(&vmi))
return -EFAULT;
/*
@@ -710,12 +711,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
lru_add_drain();
tlb_gather_mmu(&tlb, mm);
+ next = vma_next(&vmi);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
*/
free_pgd_range(&tlb, new_end, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+ next ? next->vm_start : USER_PGTABLES_CEILING);
} else {
/*
* otherwise, clean from old_start; this is done to not touch
@@ -724,7 +726,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* for the others its just a little faster.
*/
free_pgd_range(&tlb, old_start, old_end, new_end,
- vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
+ next ? next->vm_start : USER_PGTABLES_CEILING);
}
tlb_finish_mmu(&tlb);
@@ -1010,6 +1012,7 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
+ lru_gen_add_mm(mm);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
@@ -1022,9 +1025,8 @@ static int exec_mmap(struct mm_struct *mm)
activate_mm(active_mm, mm);
if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
local_irq_enable();
- tsk->mm->vmacache_seqnum = 0;
- vmacache_flush(tsk);
task_unlock(tsk);
+ lru_gen_use_mm(mm);
if (old_mm) {
mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index c17ccc19b938..5dc0a31f4a08 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -126,6 +126,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
struct ext2_group_desc * desc;
struct buffer_head * bh = NULL;
ext2_fsblk_t bitmap_blk;
+ int ret;
desc = ext2_get_group_desc(sb, block_group, NULL);
if (!desc)
@@ -139,10 +140,10 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
block_group, le32_to_cpu(desc->bg_block_bitmap));
return NULL;
}
- if (likely(bh_uptodate_or_lock(bh)))
+ ret = bh_read(bh, 0);
+ if (ret > 0)
return bh;
-
- if (bh_submit_read(bh) < 0) {
+ if (ret < 0) {
brelse(bh);
ext2_error(sb, __func__,
"Cannot read block bitmap - "
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51897427a534..b4a6e0a1b945 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -776,7 +776,8 @@ static int fuse_check_page(struct page *page)
1 << PG_active |
1 << PG_workingset |
1 << PG_reclaim |
- 1 << PG_waiters))) {
+ 1 << PG_waiters |
+ LRU_GEN_MASK | LRU_REFS_MASK))) {
dump_page(page, "fuse: trying to steal weird page");
return 1;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 7e70e0ba5a6c..6ed728aae9a5 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -525,8 +525,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
- if (!buffer_locked(first_bh))
- ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &first_bh);
+ bh_read_nowait(first_bh, REQ_META | REQ_PRIO);
dblock++;
extlen--;
@@ -534,9 +533,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
while (extlen) {
bh = gfs2_getbuf(gl, dblock, CREATE);
- if (!buffer_uptodate(bh) && !buffer_locked(bh))
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD | REQ_META |
- REQ_PRIO, 1, &bh);
+ bh_readahead(bh, REQ_RAHEAD | REQ_META | REQ_PRIO);
brelse(bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index f201eaf59d0d..1ed17226d9ed 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -745,12 +745,8 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
}
if (PageUptodate(page))
set_buffer_uptodate(bh);
- if (!buffer_uptodate(bh)) {
- ll_rw_block(REQ_OP_READ | REQ_META | REQ_PRIO, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- goto unlock_out;
- }
+ if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
+ goto unlock_out;
if (gfs2_is_jdata(ip))
gfs2_trans_add_data(ip->i_gl, bh);
else
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index f7a5b5124d8a..ed57a029eab0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -364,13 +364,155 @@ static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
return -EINVAL;
}
-static void remove_huge_page(struct page *page)
+static void hugetlb_delete_from_page_cache(struct page *page)
{
ClearPageDirty(page);
ClearPageUptodate(page);
delete_from_page_cache(page);
}
+/*
+ * Called with i_mmap_rwsem held for inode based vma maps. This makes
+ * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault
+ * mutex for the page in the mapping. So, we can not race with page being
+ * faulted into the vma.
+ */
+static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
+ unsigned long addr, struct page *page)
+{
+ pte_t *ptep, pte;
+
+ ptep = huge_pte_offset(vma->vm_mm, addr,
+ huge_page_size(hstate_vma(vma)));
+
+ if (!ptep)
+ return false;
+
+ pte = huge_ptep_get(ptep);
+ if (huge_pte_none(pte) || !pte_present(pte))
+ return false;
+
+ if (pte_page(pte) == page)
+ return true;
+
+ return false;
+}
+
+/*
+ * Can vma_offset_start/vma_offset_end overflow on 32-bit arches?
+ * No, because the interval tree returns us only those vmas
+ * which overlap the truncated area starting at pgoff,
+ * and no vma on a 32-bit arch can span beyond the 4GB.
+ */
+static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start)
+{
+ if (vma->vm_pgoff < start)
+ return (start - vma->vm_pgoff) << PAGE_SHIFT;
+ else
+ return 0;
+}
+
+static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end)
+{
+ unsigned long t_end;
+
+ if (!end)
+ return vma->vm_end;
+
+ t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start;
+ if (t_end > vma->vm_end)
+ t_end = vma->vm_end;
+ return t_end;
+}
+
+/*
+ * Called with hugetlb fault mutex held. Therefore, no more mappings to
+ * this folio can be created while executing the routine.
+ */
+static void hugetlb_unmap_file_folio(struct hstate *h,
+ struct address_space *mapping,
+ struct folio *folio, pgoff_t index)
+{
+ struct rb_root_cached *root = &mapping->i_mmap;
+ struct hugetlb_vma_lock *vma_lock;
+ struct page *page = &folio->page;
+ struct vm_area_struct *vma;
+ unsigned long v_start;
+ unsigned long v_end;
+ pgoff_t start, end;
+
+ start = index * pages_per_huge_page(h);
+ end = (index + 1) * pages_per_huge_page(h);
+
+ i_mmap_lock_write(mapping);
+retry:
+ vma_lock = NULL;
+ vma_interval_tree_foreach(vma, root, start, end - 1) {
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+
+ if (!hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
+ continue;
+
+ if (!hugetlb_vma_trylock_write(vma)) {
+ vma_lock = vma->vm_private_data;
+ /*
+ * If we can not get vma lock, we need to drop
+ * immap_sema and take locks in order. First,
+ * take a ref on the vma_lock structure so that
+ * we can be guaranteed it will not go away when
+ * dropping immap_sema.
+ */
+ kref_get(&vma_lock->refs);
+ break;
+ }
+
+ unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
+ NULL, ZAP_FLAG_DROP_MARKER);
+ hugetlb_vma_unlock_write(vma);
+ }
+
+ i_mmap_unlock_write(mapping);
+
+ if (vma_lock) {
+ /*
+ * Wait on vma_lock. We know it is still valid as we have
+ * a reference. We must 'open code' vma locking as we do
+ * not know if vma_lock is still attached to vma.
+ */
+ down_write(&vma_lock->rw_sema);
+ i_mmap_lock_write(mapping);
+
+ vma = vma_lock->vma;
+ if (!vma) {
+ /*
+ * If lock is no longer attached to vma, then just
+ * unlock, drop our reference and retry looking for
+ * other vmas.
+ */
+ up_write(&vma_lock->rw_sema);
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+ goto retry;
+ }
+
+ /*
+ * vma_lock is still attached to vma. Check to see if vma
+ * still maps page and if so, unmap.
+ */
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+ if (hugetlb_vma_maps_page(vma, vma->vm_start + v_start, page))
+ unmap_hugepage_range(vma, vma->vm_start + v_start,
+ v_end, NULL,
+ ZAP_FLAG_DROP_MARKER);
+
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+ hugetlb_vma_unlock_write(vma);
+
+ goto retry;
+ }
+}
+
static void
hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
zap_flags_t zap_flags)
@@ -383,32 +525,66 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
* an inclusive "last".
*/
vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
- unsigned long v_offset;
+ unsigned long v_start;
unsigned long v_end;
+ if (!hugetlb_vma_trylock_write(vma))
+ continue;
+
+ v_start = vma_offset_start(vma, start);
+ v_end = vma_offset_end(vma, end);
+
+ unmap_hugepage_range(vma, vma->vm_start + v_start, v_end,
+ NULL, zap_flags);
+
/*
- * Can the expression below overflow on 32-bit arches?
- * No, because the interval tree returns us only those vmas
- * which overlap the truncated area starting at pgoff,
- * and no vma on a 32-bit arch can span beyond the 4GB.
+ * Note that vma lock only exists for shared/non-private
+ * vmas. Therefore, lock is not held when calling
+ * unmap_hugepage_range for private vmas.
*/
- if (vma->vm_pgoff < start)
- v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
- else
- v_offset = 0;
-
- if (!end)
- v_end = vma->vm_end;
- else {
- v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
- + vma->vm_start;
- if (v_end > vma->vm_end)
- v_end = vma->vm_end;
- }
+ hugetlb_vma_unlock_write(vma);
+ }
+}
- unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
- NULL, zap_flags);
+/*
+ * Called with hugetlb fault mutex held.
+ * Returns true if page was actually removed, false otherwise.
+ */
+static bool remove_inode_single_folio(struct hstate *h, struct inode *inode,
+ struct address_space *mapping,
+ struct folio *folio, pgoff_t index,
+ bool truncate_op)
+{
+ bool ret = false;
+
+ /*
+ * If folio is mapped, it was faulted in after being
+ * unmapped in caller. Unmap (again) while holding
+ * the fault mutex. The mutex will prevent faults
+ * until we finish removing the folio.
+ */
+ if (unlikely(folio_mapped(folio)))
+ hugetlb_unmap_file_folio(h, mapping, folio, index);
+
+ folio_lock(folio);
+ /*
+ * We must remove the folio from page cache before removing
+ * the region/ reserve map (hugetlb_unreserve_pages). In
+ * rare out of memory conditions, removal of the region/reserve
+ * map could fail. Correspondingly, the subpool and global
+ * reserve usage count can need to be adjusted.
+ */
+ VM_BUG_ON(HPageRestoreReserve(&folio->page));
+ hugetlb_delete_from_page_cache(&folio->page);
+ ret = true;
+ if (!truncate_op) {
+ if (unlikely(hugetlb_unreserve_pages(inode, index,
+ index + 1, 1)))
+ hugetlb_fix_reserve_counts(inode);
}
+
+ folio_unlock(folio);
+ return ret;
}
/*
@@ -418,10 +594,10 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
* truncation is indicated by end of range being LLONG_MAX
* In this case, we first scan the range and release found pages.
* After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
- * maps and global counts. Page faults can not race with truncation
- * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
- * page faults in the truncated range by checking i_size. i_size is
- * modified while holding i_mmap_rwsem.
+ * maps and global counts. Page faults can race with truncation.
+ * During faults, hugetlb_no_page() checks i_size before page allocation,
+ * and again after obtaining page table lock. It will 'back out'
+ * allocations in the truncated range.
* hole punch is indicated if end is not LLONG_MAX
* In the hole punch case we scan the range and release found pages.
* Only when releasing a page is the associated region/reserve map
@@ -451,61 +627,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
u32 hash = 0;
index = folio->index;
- if (!truncate_op) {
- /*
- * Only need to hold the fault mutex in the
- * hole punch case. This prevents races with
- * page faults. Races are not possible in the
- * case of truncation.
- */
- hash = hugetlb_fault_mutex_hash(mapping, index);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
- }
+ hash = hugetlb_fault_mutex_hash(mapping, index);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
- * If folio is mapped, it was faulted in after being
- * unmapped in caller. Unmap (again) now after taking
- * the fault mutex. The mutex will prevent faults
- * until we finish removing the folio.
- *
- * This race can only happen in the hole punch case.
- * Getting here in a truncate operation is a bug.
+ * Remove folio that was part of folio_batch.
*/
- if (unlikely(folio_mapped(folio))) {
- BUG_ON(truncate_op);
-
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_lock_write(mapping);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
- hugetlb_vmdelete_list(&mapping->i_mmap,
- index * pages_per_huge_page(h),
- (index + 1) * pages_per_huge_page(h),
- ZAP_FLAG_DROP_MARKER);
- i_mmap_unlock_write(mapping);
- }
-
- folio_lock(folio);
- /*
- * We must free the huge page and remove from page
- * cache (remove_huge_page) BEFORE removing the
- * region/reserve map (hugetlb_unreserve_pages). In
- * rare out of memory conditions, removal of the
- * region/reserve map could fail. Correspondingly,
- * the subpool and global reserve usage count can need
- * to be adjusted.
- */
- VM_BUG_ON(HPageRestoreReserve(&folio->page));
- remove_huge_page(&folio->page);
- freed++;
- if (!truncate_op) {
- if (unlikely(hugetlb_unreserve_pages(inode,
- index, index + 1, 1)))
- hugetlb_fix_reserve_counts(inode);
- }
-
- folio_unlock(folio);
- if (!truncate_op)
- mutex_unlock(&hugetlb_fault_mutex_table[hash]);
+ if (remove_inode_single_folio(h, inode, mapping, folio,
+ index, truncate_op))
+ freed++;
+
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
folio_batch_release(&fbatch);
cond_resched();
@@ -543,8 +675,8 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
BUG_ON(offset & ~huge_page_mask(h));
pgoff = offset >> PAGE_SHIFT;
- i_mmap_lock_write(mapping);
i_size_write(inode, offset);
+ i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
ZAP_FLAG_DROP_MARKER);
@@ -703,11 +835,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
/* addr is the offset within the file (zero based) */
addr = index * hpage_size;
- /*
- * fault mutex taken here, protects against fault path
- * and hole punch. inode_lock previously taken protects
- * against truncation.
- */
+ /* mutex taken here, fault path and hole punch */
hash = hugetlb_fault_mutex_hash(mapping, index);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -737,7 +865,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
}
clear_huge_page(page, addr, pages_per_huge_page(h));
__SetPageUptodate(page);
- error = huge_add_to_page_cache(page, mapping, index);
+ error = hugetlb_add_to_page_cache(page, mapping, index);
if (unlikely(error)) {
restore_reserve_on_error(h, &pseudo_vma, addr, page);
put_page(page);
@@ -749,7 +877,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
SetHPageMigratable(page);
/*
- * unlock_page because locked by huge_add_to_page_cache()
+ * unlock_page because locked by hugetlb_add_to_page_cache()
* put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
@@ -994,7 +1122,7 @@ static int hugetlbfs_error_remove_page(struct address_space *mapping,
struct inode *inode = mapping->host;
pgoff_t index = page->index;
- remove_huge_page(page);
+ hugetlb_delete_from_page_cache(page);
if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
hugetlb_fix_reserve_counts(inode);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index b466172eec25..59b03d74ecbe 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -82,7 +82,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
return 0;
}
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
- ll_rw_block(REQ_OP_READ, haveblocks, bhs);
+ bh_read_batch(haveblocks, bhs);
curbh = 0;
curpage = 0;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index bc8270e0d7d0..2696f43e7239 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1898,19 +1898,16 @@ static int journal_get_superblock(journal_t *journal)
{
struct buffer_head *bh;
journal_superblock_t *sb;
- int err = -EIO;
+ int err;
bh = journal->j_sb_buffer;
J_ASSERT(bh != NULL);
- if (!buffer_uptodate(bh)) {
- ll_rw_block(REQ_OP_READ, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- printk(KERN_ERR
- "JBD2: IO error reading journal superblock\n");
- goto out;
- }
+ err = bh_read(bh, 0);
+ if (err < 0) {
+ printk(KERN_ERR
+ "JBD2: IO error reading journal superblock\n");
+ goto out;
}
if (buffer_verified(bh))
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3688d16fe83b..8286a9ec122f 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -100,7 +100,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh;
if (nbufs == MAXBUF) {
- ll_rw_block(REQ_OP_READ, nbufs, bufs);
+ bh_readahead_batch(nbufs, bufs, 0);
journal_brelse_array(bufs, nbufs);
nbufs = 0;
}
@@ -109,7 +109,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
if (nbufs)
- ll_rw_block(REQ_OP_READ, nbufs, bufs);
+ bh_readahead_batch(nbufs, bufs, 0);
err = 0;
failed:
@@ -152,9 +152,14 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
return -ENOMEM;
if (!buffer_uptodate(bh)) {
- /* If this is a brand new buffer, start readahead.
- Otherwise, we assume we are already reading it. */
- if (!buffer_req(bh))
+ /*
+ * If this is a brand new buffer, start readahead.
+ * Otherwise, we assume we are already reading it.
+ */
+ bool need_readahead = !buffer_req(bh);
+
+ bh_read_nowait(bh, 0);
+ if (need_readahead)
do_readahead(journal, offset);
wait_on_buffer(bh);
}
@@ -688,7 +693,6 @@ static int do_one_pass(journal_t *journal,
mark_buffer_dirty(nbh);
BUFFER_TRACE(nbh, "marking uptodate");
++info->nr_replays;
- /* ll_rw_block(WRITE, 1, &nbh); */
unlock_buffer(nbh);
brelse(obh);
brelse(nbh);
diff --git a/fs/namei.c b/fs/namei.c
index 8533087e5dac..33c80d15ac85 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -5088,7 +5088,7 @@ int page_symlink(struct inode *inode, const char *symname, int len)
const struct address_space_operations *aops = mapping->a_ops;
bool nofs = !mapping_gfp_constraint(mapping, __GFP_FS);
struct page *page;
- void *fsdata;
+ void *fsdata = NULL;
int err;
unsigned int flags;
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 3267e96c256c..39b7eea2642a 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -480,41 +480,36 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
sector_t start_blk,
sector_t *blkoff)
{
- unsigned int i;
+ unsigned int i, nr_folios;
pgoff_t index;
- unsigned int nblocks_in_page;
unsigned long length = 0;
- sector_t b;
- struct pagevec pvec;
- struct page *page;
+ struct folio_batch fbatch;
+ struct folio *folio;
if (inode->i_mapping->nrpages == 0)
return 0;
index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
- nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits);
- pagevec_init(&pvec);
+ folio_batch_init(&fbatch);
repeat:
- pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE,
- pvec.pages);
- if (pvec.nr == 0)
+ nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
+ &fbatch);
+ if (nr_folios == 0)
return length;
- if (length > 0 && pvec.pages[0]->index > index)
- goto out;
-
- b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits);
i = 0;
do {
- page = pvec.pages[i];
+ folio = fbatch.folios[i];
- lock_page(page);
- if (page_has_buffers(page)) {
+ folio_lock(folio);
+ if (folio_buffers(folio)) {
struct buffer_head *bh, *head;
+ sector_t b;
- bh = head = page_buffers(page);
+ b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
+ bh = head = folio_buffers(folio);
do {
if (b < start_blk)
continue;
@@ -529,21 +524,17 @@ repeat:
} else {
if (length > 0)
goto out_locked;
-
- b += nblocks_in_page;
}
- unlock_page(page);
+ folio_unlock(folio);
- } while (++i < pagevec_count(&pvec));
+ } while (++i < nr_folios);
- index = page->index + 1;
- pagevec_release(&pvec);
+ folio_batch_release(&fbatch);
cond_resched();
goto repeat;
out_locked:
- unlock_page(page);
-out:
- pagevec_release(&pvec);
+ folio_unlock(folio);
+ folio_batch_release(&fbatch);
return length;
}
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index 26a76ebfe58f..d5a3afbbbfd8 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -630,12 +630,9 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
bh->b_size = block_size;
off = vbo & (PAGE_SIZE - 1);
set_bh_page(bh, page, off);
- ll_rw_block(REQ_OP_READ, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- err = -EIO;
+ err = bh_read(bh, 0);
+ if (err < 0)
goto out;
- }
zero_user_segment(page, off + voff, off + block_size);
}
}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index af4157f61927..1d65f6ef00ca 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -636,7 +636,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
!buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) &&
(block_start < from || block_end > to)) {
- ll_rw_block(REQ_OP_READ, 1, &bh);
+ bh_read_nowait(bh, 0);
*wait_bh++=bh;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index e2cc9eec287c..26b4c2bfee49 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1764,9 +1764,7 @@ static int ocfs2_get_sector(struct super_block *sb,
if (!buffer_dirty(*bh))
clear_buffer_uptodate(*bh);
unlock_buffer(*bh);
- ll_rw_block(REQ_OP_READ, 1, bh);
- wait_on_buffer(*bh);
- if (!buffer_uptodate(*bh)) {
+ if (bh_read(*bh, 0) < 0) {
mlog_errno(-EIO);
brelse(*bh);
*bh = NULL;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index b4f109875e79..74dc0f571dc9 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -24,6 +24,7 @@
#include <linux/user_namespace.h>
#include <linux/namei.h>
#include <linux/mnt_idmapping.h>
+#include <linux/iversion.h>
static struct posix_acl **acl_by_type(struct inode *inode, int type)
{
@@ -1227,6 +1228,8 @@ int simple_set_acl(struct user_namespace *mnt_userns, struct inode *inode,
}
inode->i_ctime = current_time(inode);
+ if (IS_I_VERSION(inode))
+ inode_inc_iversion(inode);
set_cached_acl(inode, type, acl);
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 2d9429bf51fa..9e479d7d202b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2350,6 +2350,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
GENRADIX(struct map_files_info) fa;
struct map_files_info *p;
int ret;
+ struct vma_iterator vmi;
genradix_init(&fa);
@@ -2388,7 +2389,9 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
* routine might require mmap_lock taken in might_fault().
*/
- for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
+ pos = 2;
+ vma_iter_init(&vmi, mm, 0);
+ for_each_vma(vmi, vma) {
if (!vma->vm_file)
continue;
if (++pos <= ctx->pos)
@@ -3196,6 +3199,19 @@ static int proc_pid_ksm_merging_pages(struct seq_file *m, struct pid_namespace *
return 0;
}
+static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ struct mm_struct *mm;
+
+ mm = get_task_mm(task);
+ if (mm) {
+ seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
+ mmput(mm);
+ }
+
+ return 0;
+}
#endif /* CONFIG_KSM */
#ifdef CONFIG_STACKLEAK_METRICS
@@ -3331,6 +3347,7 @@ static const struct pid_entry tgid_base_stuff[] = {
#endif
#ifdef CONFIG_KSM
ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages),
+ ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat),
#endif
};
@@ -3668,6 +3685,7 @@ static const struct pid_entry tid_base_stuff[] = {
#endif
#ifdef CONFIG_KSM
ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages),
+ ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat),
#endif
};
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 06a80f78433d..f03000764ce5 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -285,7 +285,7 @@ struct proc_maps_private {
struct task_struct *task;
struct mm_struct *mm;
#ifdef CONFIG_MMU
- struct vm_area_struct *tail_vma;
+ struct vma_iterator iter;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 4e0023643f8b..8b4f3073f8f5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pagewalk.h>
-#include <linux/vmacache.h>
#include <linux/mm_inline.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
@@ -124,12 +123,26 @@ static void release_task_mempolicy(struct proc_maps_private *priv)
}
#endif
+static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
+ loff_t *ppos)
+{
+ struct vm_area_struct *vma = vma_next(&priv->iter);
+
+ if (vma) {
+ *ppos = vma->vm_start;
+ } else {
+ *ppos = -2UL;
+ vma = get_gate_vma(priv->mm);
+ }
+
+ return vma;
+}
+
static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = *ppos;
struct mm_struct *mm;
- struct vm_area_struct *vma;
/* See m_next(). Zero at the start or after lseek. */
if (last_addr == -1UL)
@@ -153,31 +166,21 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return ERR_PTR(-EINTR);
}
+ vma_iter_init(&priv->iter, mm, last_addr);
hold_task_mempolicy(priv);
- priv->tail_vma = get_gate_vma(mm);
-
- vma = find_vma(mm, last_addr);
- if (vma)
- return vma;
+ if (last_addr == -2UL)
+ return get_gate_vma(mm);
- return priv->tail_vma;
+ return proc_get_vma(priv, ppos);
}
static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
{
- struct proc_maps_private *priv = m->private;
- struct vm_area_struct *next, *vma = v;
-
- if (vma == priv->tail_vma)
- next = NULL;
- else if (vma->vm_next)
- next = vma->vm_next;
- else
- next = priv->tail_vma;
-
- *ppos = next ? next->vm_start : -1UL;
-
- return next;
+ if (*ppos == -2UL) {
+ *ppos = -1UL;
+ return NULL;
+ }
+ return proc_get_vma(m->private, ppos);
}
static void m_stop(struct seq_file *m, void *v)
@@ -864,7 +867,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %d\n",
- hugepage_vma_check(vma, vma->vm_flags, true, false));
+ hugepage_vma_check(vma, vma->vm_flags, true, false, true));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -877,16 +880,16 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct mem_size_stats mss;
- struct mm_struct *mm;
+ struct mm_struct *mm = priv->mm;
struct vm_area_struct *vma;
- unsigned long last_vma_end = 0;
+ unsigned long vma_start = 0, last_vma_end = 0;
int ret = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
priv->task = get_proc_task(priv->inode);
if (!priv->task)
return -ESRCH;
- mm = priv->mm;
if (!mm || !mmget_not_zero(mm)) {
ret = -ESRCH;
goto out_put_task;
@@ -899,8 +902,13 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
goto out_put_mm;
hold_task_mempolicy(priv);
+ vma = mas_find(&mas, 0);
+
+ if (unlikely(!vma))
+ goto empty_set;
- for (vma = priv->mm->mmap; vma;) {
+ vma_start = vma->vm_start;
+ do {
smap_gather_stats(vma, &mss, 0);
last_vma_end = vma->vm_end;
@@ -909,6 +917,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* access it for write request.
*/
if (mmap_lock_is_contended(mm)) {
+ mas_pause(&mas);
mmap_read_unlock(mm);
ret = mmap_read_lock_killable(mm);
if (ret) {
@@ -952,7 +961,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
* contains last_vma_end.
* Iterate VMA' from last_vma_end.
*/
- vma = find_vma(mm, last_vma_end - 1);
+ vma = mas_find(&mas, ULONG_MAX);
/* Case 3 above */
if (!vma)
break;
@@ -966,11 +975,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
smap_gather_stats(vma, &mss, last_vma_end);
}
/* Case 2 above */
- vma = vma->vm_next;
- }
+ } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
- show_vma_header_prefix(m, priv->mm->mmap->vm_start,
- last_vma_end, 0, 0, 0, 0);
+empty_set:
+ show_vma_header_prefix(m, vma_start, last_vma_end, 0, 0, 0, 0);
seq_pad(m, ' ');
seq_puts(m, "[rollup]\n");
@@ -1263,6 +1271,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
struct mmu_notifier_range range;
struct clear_refs_private cp = {
.type = type,
@@ -1282,7 +1291,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
}
if (type == CLEAR_REFS_SOFT_DIRTY) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
vma->vm_flags &= ~VM_SOFTDIRTY;
@@ -1294,8 +1303,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
0, NULL, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range);
}
- walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
- &cp);
+ walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
if (type == CLEAR_REFS_SOFT_DIRTY) {
mmu_notifier_invalidate_range_end(&range);
flush_tlb_mm(mm);
@@ -1418,9 +1426,19 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (pte_swp_uffd_wp(pte))
flags |= PM_UFFD_WP;
entry = pte_to_swp_entry(pte);
- if (pm->show_pfn)
+ if (pm->show_pfn) {
+ pgoff_t offset;
+ /*
+ * For PFN swap offsets, keeping the offset field
+ * to be PFN only to be compatible with old smaps.
+ */
+ if (is_pfn_swap_entry(entry))
+ offset = swp_offset_pfn(entry);
+ else
+ offset = swp_offset(entry);
frame = swp_type(entry) |
- (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+ (offset << MAX_SWAPFILES_SHIFT);
+ }
flags |= PM_SWAP;
migration = is_migration_entry(entry);
if (is_pfn_swap_entry(entry))
@@ -1477,7 +1495,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
unsigned long offset;
if (pm->show_pfn) {
- offset = swp_offset(entry) +
+ if (is_pfn_swap_entry(entry))
+ offset = swp_offset_pfn(entry);
+ else
+ offset = swp_offset(entry);
+ offset = offset +
((addr & ~PMD_MASK) >> PAGE_SHIFT);
frame = swp_type(entry) |
(offset << MAX_SWAPFILES_SHIFT);
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index a6d21fc0033c..2fd06f52b6a4 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -20,15 +20,13 @@
*/
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
struct vm_region *region;
- struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0, size;
-
- mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
bytes += kobjsize(vma);
region = vma->vm_region;
@@ -82,15 +80,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
unsigned long task_vsize(struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
- struct rb_node *p;
unsigned long vsize = 0;
mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ for_each_vma(vmi, vma)
vsize += vma->vm_end - vma->vm_start;
- }
mmap_read_unlock(mm);
return vsize;
}
@@ -99,14 +95,13 @@ unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
struct vm_region *region;
- struct rb_node *p;
unsigned long size = kobjsize(mm);
mmap_read_lock(mm);
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
- vma = rb_entry(p, struct vm_area_struct, vm_rb);
+ for_each_vma(vmi, vma) {
size += kobjsize(vma);
region = vma->vm_region;
if (region) {
@@ -190,17 +185,19 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
*/
static int show_map(struct seq_file *m, void *_p)
{
- struct rb_node *p = _p;
-
- return nommu_vma_show(m, rb_entry(p, struct vm_area_struct, vm_rb));
+ return nommu_vma_show(m, _p);
}
static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct mm_struct *mm;
- struct rb_node *p;
- loff_t n = *pos;
+ struct vm_area_struct *vma;
+ unsigned long addr = *pos;
+
+ /* See m_next(). Zero at the start or after lseek. */
+ if (addr == -1UL)
+ return NULL;
/* pin the task and mm whilst we play with them */
priv->task = get_proc_task(priv->inode);
@@ -216,10 +213,10 @@ static void *m_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-EINTR);
}
- /* start from the Nth VMA */
- for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
- if (n-- == 0)
- return p;
+ /* start the next element from addr */
+ vma = find_vma(mm, addr);
+ if (vma)
+ return vma;
mmap_read_unlock(mm);
mmput(mm);
@@ -242,10 +239,10 @@ static void m_stop(struct seq_file *m, void *_vml)
static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
{
- struct rb_node *p = _p;
+ struct vm_area_struct *vma = _p;
- (*pos)++;
- return p ? rb_next(p) : NULL;
+ *pos = vma->vm_end;
+ return find_vma(vma->vm_mm, vma->vm_end);
}
static const struct seq_operations proc_pid_maps_ops = {
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index ba3525ccc27e..cb240eac5036 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -203,9 +203,9 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- unsigned long maxpages, lpages, nr, loop, ret;
+ unsigned long maxpages, lpages, nr_folios, loop, ret, nr_pages, pfn;
struct inode *inode = file_inode(file);
- struct page **pages = NULL, **ptr, *page;
+ struct folio_batch fbatch;
loff_t isize;
/* the mapping mustn't extend beyond the EOF */
@@ -221,31 +221,39 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
goto out;
/* gang-find the pages */
- pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- goto out_free;
-
- nr = find_get_pages_contig(inode->i_mapping, pgoff, lpages, pages);
- if (nr != lpages)
- goto out_free_pages; /* leave if some pages were missing */
+ folio_batch_init(&fbatch);
+ nr_pages = 0;
+repeat:
+ nr_folios = filemap_get_folios_contig(inode->i_mapping, &pgoff,
+ ULONG_MAX, &fbatch);
+ if (!nr_folios) {
+ ret = -ENOSYS;
+ return ret;
+ }
+ if (ret == -ENOSYS) {
+ ret = (unsigned long) folio_address(fbatch.folios[0]);
+ pfn = folio_pfn(fbatch.folios[0]);
+ }
/* check the pages for physical adjacency */
- ptr = pages;
- page = *ptr++;
- page++;
- for (loop = lpages; loop > 1; loop--)
- if (*ptr++ != page++)
- goto out_free_pages;
+ for (loop = 0; loop < nr_folios; loop++) {
+ if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) {
+ ret = -ENOSYS;
+ goto out_free; /* leave if not physical adjacent */
+ }
+ nr_pages += folio_nr_pages(fbatch.folios[loop]);
+ if (nr_pages >= lpages)
+ goto out_free; /* successfully found desired pages*/
+ }
+ if (nr_pages < lpages) {
+ folio_batch_release(&fbatch);
+ goto repeat; /* loop if pages are missing */
+ }
/* okay - all conditions fulfilled */
- ret = (unsigned long) page_address(pages[0]);
-out_free_pages:
- ptr = pages;
- for (loop = nr; loop > 0; loop--)
- put_page(*ptr++);
out_free:
- kfree(pages);
+ folio_batch_release(&fbatch);
out:
return ret;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 94addfcefede..9f62da7471c9 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -868,7 +868,7 @@ loop_next:
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
- ll_rw_block(REQ_OP_WRITE, 1, &bh);
+ write_dirty_buffer(bh, 0);
spin_lock(lock);
}
put_bh(bh);
@@ -1054,7 +1054,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
- ll_rw_block(REQ_OP_WRITE, 1, &tbh);
+ write_dirty_buffer(tbh, 0);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
@@ -2240,7 +2240,7 @@ abort_replay:
}
}
/* read in the log blocks, memcpy to the corresponding real block */
- ll_rw_block(REQ_OP_READ, get_desc_trans_len(desc), log_blocks);
+ bh_read_batch(get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
@@ -2342,10 +2342,11 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
- ll_rw_block(REQ_OP_READ, j, bhlist);
+ bh = bhlist[0];
+ bh_read_nowait(bh, 0);
+ bh_readahead_batch(j - 1, &bhlist[1], 0);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
- bh = bhlist[0];
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 9a293609a022..84c12a1947b2 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -579,7 +579,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, bh + j);
+ bh_readahead(bh[j], REQ_RAHEAD);
}
brelse(bh[j]);
}
@@ -685,7 +685,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
- ll_rw_block(REQ_OP_READ, 1, &bh);
+ bh_read_nowait(bh, 0);
wait_on_buffer(bh);
if (depth != -1)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index da1e72494e30..929acce6e731 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1702,9 +1702,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
- ll_rw_block(REQ_OP_READ, 1, &SB_BUFFER_WITH_SB(s));
- wait_on_buffer(SB_BUFFER_WITH_SB(s));
- if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
+ if (bh_read(SB_BUFFER_WITH_SB(s), 0) < 0) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
return 1;
}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index cad3772f9dbe..be640f4b2f2c 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -130,7 +130,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
brelse(tmp);
}
if (num) {
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
+ bh_readahead_batch(num, bha, REQ_RAHEAD);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index a2adf6293093..16bcf2c6b8b3 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -89,7 +89,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
brelse(tmp);
}
if (num) {
- ll_rw_block(REQ_OP_READ | REQ_RAHEAD, num, bha);
+ bh_readahead_batch(num, bha, REQ_RAHEAD);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 8d06daed549f..dce6ae9ae306 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1211,13 +1211,7 @@ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
if (!bh)
return NULL;
- if (buffer_uptodate(bh))
- return bh;
-
- ll_rw_block(REQ_OP_READ, 1, &bh);
-
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
+ if (bh_read(bh, 0) >= 0)
return bh;
brelse(bh);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index bd810d8239f2..2436e3f82147 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -295,14 +295,10 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, oldb + pos);
- if (!buffer_uptodate(bh)) {
- ll_rw_block(REQ_OP_READ, 1, &bh);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh)) {
- ufs_error(inode->i_sb, __func__,
- "read of block failed\n");
- break;
- }
+ if (bh_read(bh, 0) < 0) {
+ ufs_error(inode->i_sb, __func__,
+ "read of block failed\n");
+ break;
}
UFSD(" change from %llu to %llu, pos %u\n",
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 0c1d33c4f74c..07c81ab3fd4d 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -30,6 +30,7 @@
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/swapops.h>
+#include <linux/miscdevice.h>
int sysctl_unprivileged_userfaultfd __read_mostly;
@@ -415,13 +416,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (ctx->features & UFFD_FEATURE_SIGBUS)
goto out;
- if ((vmf->flags & FAULT_FLAG_USER) == 0 &&
- ctx->flags & UFFD_USER_MODE_ONLY) {
- printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
- "sysctl knob to 1 if kernel faults must be handled "
- "without obtaining CAP_SYS_PTRACE capability\n");
+ if (!(vmf->flags & FAULT_FLAG_USER) && (ctx->flags & UFFD_USER_MODE_ONLY))
goto out;
- }
/*
* If it's already released don't get it. This avoids to loop
@@ -615,14 +611,16 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
if (release_new_ctx) {
struct vm_area_struct *vma;
struct mm_struct *mm = release_new_ctx->mm;
+ VMA_ITERATOR(vmi, mm, 0);
/* the various vma->vm_userfaultfd_ctx still points to it */
mmap_write_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ for_each_vma(vmi, vma) {
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~__VM_UFFD_FLAGS;
}
+ }
mmap_write_unlock(mm);
userfaultfd_ctx_put(release_new_ctx);
@@ -803,11 +801,13 @@ static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
return false;
}
-int userfaultfd_unmap_prep(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *unmaps)
+int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *unmaps)
{
- for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
+ VMA_ITERATOR(vmi, mm, start);
+ struct vm_area_struct *vma;
+
+ for_each_vma_range(vmi, vma, end) {
struct userfaultfd_unmap_ctx *unmap_ctx;
struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
@@ -857,6 +857,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
WRITE_ONCE(ctx->released, true);
@@ -873,7 +874,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
*/
mmap_write_lock(mm);
prev = NULL;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
cond_resched();
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
!!(vma->vm_flags & __VM_UFFD_FLAGS));
@@ -887,10 +888,13 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
NULL_VM_UFFD_CTX, anon_vma_name(vma));
- if (prev)
+ if (prev) {
+ mas_pause(&mas);
vma = prev;
- else
+ } else {
prev = vma;
+ }
+
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
@@ -1272,6 +1276,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
bool found;
bool basic_ioctls;
unsigned long start, end, vma_end;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
user_uffdio_register = (struct uffdio_register __user *) arg;
@@ -1314,7 +1319,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out;
mmap_write_lock(mm);
- vma = find_vma_prev(mm, start, &prev);
+ mas_set(&mas, start);
+ vma = mas_find(&mas, ULONG_MAX);
if (!vma)
goto out_unlock;
@@ -1339,7 +1345,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
*/
found = false;
basic_ioctls = false;
- for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+ for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
@@ -1399,8 +1405,10 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
}
BUG_ON(!found);
- if (vma->vm_start < start)
- prev = vma;
+ mas_set(&mas, start);
+ prev = mas_prev(&mas, 0);
+ if (prev != vma)
+ mas_next(&mas, ULONG_MAX);
ret = 0;
do {
@@ -1430,6 +1438,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
((struct vm_userfaultfd_ctx){ ctx }),
anon_vma_name(vma));
if (prev) {
+ /* vma_merge() invalidated the mas */
+ mas_pause(&mas);
vma = prev;
goto next;
}
@@ -1437,11 +1447,15 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
ret = split_vma(mm, vma, start, 1);
if (ret)
break;
+ /* split_vma() invalidated the mas */
+ mas_pause(&mas);
}
if (vma->vm_end > end) {
ret = split_vma(mm, vma, end, 0);
if (ret)
break;
+ /* split_vma() invalidated the mas */
+ mas_pause(&mas);
}
next:
/*
@@ -1458,8 +1472,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
skip:
prev = vma;
start = vma->vm_end;
- vma = vma->vm_next;
- } while (vma && vma->vm_start < end);
+ vma = mas_next(&mas, end - 1);
+ } while (vma);
out_unlock:
mmap_write_unlock(mm);
mmput(mm);
@@ -1503,6 +1517,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
bool found;
unsigned long start, end, vma_end;
const void __user *buf = (void __user *)arg;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
ret = -EFAULT;
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
@@ -1521,7 +1536,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out;
mmap_write_lock(mm);
- vma = find_vma_prev(mm, start, &prev);
+ mas_set(&mas, start);
+ vma = mas_find(&mas, ULONG_MAX);
if (!vma)
goto out_unlock;
@@ -1546,7 +1562,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
*/
found = false;
ret = -EINVAL;
- for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
+ for (cur = vma; cur; cur = mas_next(&mas, end - 1)) {
cond_resched();
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
@@ -1566,8 +1582,10 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
}
BUG_ON(!found);
- if (vma->vm_start < start)
- prev = vma;
+ mas_set(&mas, start);
+ prev = mas_prev(&mas, 0);
+ if (prev != vma)
+ mas_next(&mas, ULONG_MAX);
ret = 0;
do {
@@ -1636,8 +1654,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
skip:
prev = vma;
start = vma->vm_end;
- vma = vma->vm_next;
- } while (vma && vma->vm_start < end);
+ vma = mas_next(&mas, end - 1);
+ } while (vma);
out_unlock:
mmap_write_unlock(mm);
mmput(mm);
@@ -2056,20 +2074,11 @@ static void init_once_userfaultfd_ctx(void *mem)
seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock);
}
-SYSCALL_DEFINE1(userfaultfd, int, flags)
+static int new_userfaultfd(int flags)
{
struct userfaultfd_ctx *ctx;
int fd;
- if (!sysctl_unprivileged_userfaultfd &&
- (flags & UFFD_USER_MODE_ONLY) == 0 &&
- !capable(CAP_SYS_PTRACE)) {
- printk_once(KERN_WARNING "uffd: Set unprivileged_userfaultfd "
- "sysctl knob to 1 if kernel faults must be handled "
- "without obtaining CAP_SYS_PTRACE capability\n");
- return -EPERM;
- }
-
BUG_ON(!current->mm);
/* Check the UFFD_* constants for consistency. */
@@ -2102,8 +2111,60 @@ SYSCALL_DEFINE1(userfaultfd, int, flags)
return fd;
}
+static inline bool userfaultfd_syscall_allowed(int flags)
+{
+ /* Userspace-only page faults are always allowed */
+ if (flags & UFFD_USER_MODE_ONLY)
+ return true;
+
+ /*
+ * The user is requesting a userfaultfd which can handle kernel faults.
+ * Privileged users are always allowed to do this.
+ */
+ if (capable(CAP_SYS_PTRACE))
+ return true;
+
+ /* Otherwise, access to kernel fault handling is sysctl controlled. */
+ return sysctl_unprivileged_userfaultfd;
+}
+
+SYSCALL_DEFINE1(userfaultfd, int, flags)
+{
+ if (!userfaultfd_syscall_allowed(flags))
+ return -EPERM;
+
+ return new_userfaultfd(flags);
+}
+
+static long userfaultfd_dev_ioctl(struct file *file, unsigned int cmd, unsigned long flags)
+{
+ if (cmd != USERFAULTFD_IOC_NEW)
+ return -EINVAL;
+
+ return new_userfaultfd(flags);
+}
+
+static const struct file_operations userfaultfd_dev_fops = {
+ .unlocked_ioctl = userfaultfd_dev_ioctl,
+ .compat_ioctl = userfaultfd_dev_ioctl,
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice userfaultfd_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "userfaultfd",
+ .fops = &userfaultfd_dev_fops
+};
+
static int __init userfaultfd_init(void)
{
+ int ret;
+
+ ret = misc_register(&userfaultfd_misc);
+ if (ret)
+ return ret;
+
userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
sizeof(struct userfaultfd_ctx),
0,
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 4f07afacbc23..f46258d1a080 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -2,6 +2,8 @@
#ifndef _ASM_GENERIC_CACHEFLUSH_H
#define _ASM_GENERIC_CACHEFLUSH_H
+#include <linux/instrumented.h>
+
struct mm_struct;
struct vm_area_struct;
struct page;
@@ -105,14 +107,22 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
#ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
+ instrument_copy_to_user((void __user *)dst, src, len); \
memcpy(dst, src, len); \
flush_icache_user_page(vma, page, vaddr, len); \
} while (0)
#endif
+
#ifndef copy_from_user_page
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ instrument_copy_from_user_before(dst, (void __user *)src, \
+ len); \
+ memcpy(dst, src, len); \
+ instrument_copy_from_user_after(dst, (void __user *)src, len, \
+ 0); \
+ } while (0)
#endif
#endif /* _ASM_GENERIC_CACHEFLUSH_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 06089390d81d..33fa5e94aa80 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -225,8 +225,6 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
-void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
- gfp_t gfp);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
@@ -236,7 +234,6 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
void __lock_buffer(struct buffer_head *bh);
-void ll_rw_block(blk_opf_t, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
@@ -244,7 +241,9 @@ void submit_bh(blk_opf_t, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
int bh_uptodate_or_lock(struct buffer_head *bh);
-int bh_submit_read(struct buffer_head *bh);
+int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
+void __bh_read_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags, bool force_lock);
extern int buffer_heads_over_limit;
@@ -351,12 +350,6 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline void
-sb_breadahead_unmovable(struct super_block *sb, sector_t block)
-{
- __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
-}
-
static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
@@ -418,6 +411,41 @@ static inline struct buffer_head *__getblk(struct block_device *bdev,
return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
}
+static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
+ if (!buffer_uptodate(bh))
+ __bh_read(bh, op_flags, false);
+ else
+ unlock_buffer(bh);
+ }
+}
+
+static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (!bh_uptodate_or_lock(bh))
+ __bh_read(bh, op_flags, false);
+}
+
+/* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
+static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
+{
+ if (bh_uptodate_or_lock(bh))
+ return 1;
+ return __bh_read(bh, op_flags, true);
+}
+
+static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
+{
+ __bh_read_batch(nr, bhs, 0, true);
+}
+
+static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
+ blk_opf_t op_flags)
+{
+ __bh_read_batch(nr, bhs, op_flags, false);
+}
+
/**
* __bread() - reads a specified block and returns the bh
* @bdev: the block_device to read from
diff --git a/include/linux/cache.h b/include/linux/cache.h
index d742c57eaee5..5da1bbd96154 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -85,4 +85,17 @@
#define cache_line_size() L1_CACHE_BYTES
#endif
+/*
+ * Helper to add padding within a struct to ensure data fall into separate
+ * cachelines.
+ */
+#if defined(CONFIG_SMP)
+struct cacheline_padding {
+ char x[0];
+} ____cacheline_internodealigned_in_smp;
+#define CACHELINE_PADDING(name) struct cacheline_padding name
+#else
+#define CACHELINE_PADDING(name)
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 398f0bce7c21..23b102b4349e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -433,6 +433,18 @@ static inline void cgroup_put(struct cgroup *cgrp)
css_put(&cgrp->self);
}
+extern struct mutex cgroup_mutex;
+
+static inline void cgroup_lock(void)
+{
+ mutex_lock(&cgroup_mutex);
+}
+
+static inline void cgroup_unlock(void)
+{
+ mutex_unlock(&cgroup_mutex);
+}
+
/**
* task_css_set_check - obtain a task's css_set with extra access conditions
* @task: the task to obtain css_set for
@@ -447,7 +459,6 @@ static inline void cgroup_put(struct cgroup *cgrp)
* as locks used during the cgroup_subsys::attach() methods.
*/
#ifdef CONFIG_PROVE_RCU
-extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
#define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \
@@ -707,6 +718,8 @@ struct cgroup;
static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
+static inline void cgroup_lock(void) {}
+static inline void cgroup_unlock(void) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t) { return 0; }
static inline int cgroupstats_build(struct cgroupstats *stats,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 42e55579d649..6cfd6902bd5b 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -51,6 +51,29 @@
#define __no_sanitize_undefined
#endif
+#if __has_feature(memory_sanitizer)
+#define __SANITIZE_MEMORY__
+/*
+ * Unlike other sanitizers, KMSAN still inserts code into functions marked with
+ * no_sanitize("kernel-memory"). Using disable_sanitizer_instrumentation
+ * provides the behavior consistent with other __no_sanitize_ attributes,
+ * guaranteeing that __no_sanitize_memory functions remain uninstrumented.
+ */
+#define __no_sanitize_memory __disable_sanitizer_instrumentation
+
+/*
+ * The __no_kmsan_checks attribute ensures that a function does not produce
+ * false positive reports by:
+ * - initializing all local variables and memory stores in this function;
+ * - skipping all shadow checks;
+ * - passing initialized arguments to this function's callees.
+ */
+#define __no_kmsan_checks __attribute__((no_sanitize("kernel-memory")))
+#else
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+#endif
+
/*
* Support for __has_feature(coverage_sanitizer) was added in Clang 13 together
* with no_sanitize("coverage"). Prior versions of Clang support coverage
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 9b157b71036f..f55a37efdb97 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -115,6 +115,12 @@
#endif
/*
+ * GCC does not support KMSAN.
+ */
+#define __no_sanitize_memory
+#define __no_kmsan_checks
+
+/*
* Turn individual warnings and errors on and off locally, depending
* on version.
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 74e04ecd4c89..eb0466236661 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -233,7 +233,8 @@ struct ftrace_likely_data {
/* Section for code which can't be instrumented at all */
#define noinstr \
noinline notrace __attribute((__section__(".noinstr.text"))) \
- __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage
+ __no_kcsan __no_sanitize_address __no_profile __no_sanitize_coverage \
+ __no_sanitize_memory
#endif /* __KERNEL__ */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index 7b1f4a488230..ed5470f50bab 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -216,13 +216,26 @@ struct damos_stat {
};
/**
- * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * struct damos_access_pattern - Target access pattern of the given scheme.
* @min_sz_region: Minimum size of target regions.
* @max_sz_region: Maximum size of target regions.
* @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
* @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
* @min_age_region: Minimum age of target regions.
* @max_age_region: Maximum age of target regions.
+ */
+struct damos_access_pattern {
+ unsigned long min_sz_region;
+ unsigned long max_sz_region;
+ unsigned int min_nr_accesses;
+ unsigned int max_nr_accesses;
+ unsigned int min_age_region;
+ unsigned int max_age_region;
+};
+
+/**
+ * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
+ * @pattern: Access pattern of target regions.
* @action: &damo_action to be applied to the target regions.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
@@ -230,10 +243,8 @@ struct damos_stat {
* @list: List head for siblings.
*
* For each aggregation interval, DAMON finds regions which fit in the
- * condition (&min_sz_region, &max_sz_region, &min_nr_accesses,
- * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to
- * those. To avoid consuming too much CPU time or IO resources for the
- * &action, &quota is used.
+ * &pattern and applies &action to those. To avoid consuming too much
+ * CPU time or IO resources for the &action, &quota is used.
*
* To do the work only when needed, schemes can be activated for specific
* system situations using &wmarks. If all schemes that registered to the
@@ -248,12 +259,7 @@ struct damos_stat {
* &action is applied.
*/
struct damos {
- unsigned long min_sz_region;
- unsigned long max_sz_region;
- unsigned int min_nr_accesses;
- unsigned int max_nr_accesses;
- unsigned int min_age_region;
- unsigned int max_age_region;
+ struct damos_access_pattern pattern;
enum damos_action action;
struct damos_quota quota;
struct damos_watermarks wmarks;
@@ -340,7 +346,7 @@ struct damon_operations {
unsigned long (*apply_scheme)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme);
- bool (*target_valid)(void *target);
+ bool (*target_valid)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
@@ -383,13 +389,15 @@ struct damon_callback {
};
/**
- * struct damon_ctx - Represents a context for each monitoring. This is the
- * main interface that allows users to set the attributes and get the results
- * of the monitoring.
+ * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
*
* @sample_interval: The time between access samplings.
* @aggr_interval: The time between monitor results aggregations.
* @ops_update_interval: The time between monitoring operations updates.
+ * @min_nr_regions: The minimum number of adaptive monitoring
+ * regions.
+ * @max_nr_regions: The maximum number of adaptive monitoring
+ * regions.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
* not. It aggregates and keeps the access information (number of accesses to
@@ -399,7 +407,21 @@ struct damon_callback {
* @ops_update_interval. All time intervals are in micro-seconds.
* Please refer to &struct damon_operations and &struct damon_callback for more
* detail.
+ */
+struct damon_attrs {
+ unsigned long sample_interval;
+ unsigned long aggr_interval;
+ unsigned long ops_update_interval;
+ unsigned long min_nr_regions;
+ unsigned long max_nr_regions;
+};
+
+/**
+ * struct damon_ctx - Represents a context for each monitoring. This is the
+ * main interface that allows users to set the attributes and get the results
+ * of the monitoring.
*
+ * @attrs: Monitoring attributes for accuracy/overhead control.
* @kdamond: Kernel thread who does the monitoring.
* @kdamond_lock: Mutex for the synchronizations with @kdamond.
*
@@ -421,15 +443,11 @@ struct damon_callback {
* @ops: Set of monitoring operations for given use cases.
* @callback: Set of callbacks for monitoring events notifications.
*
- * @min_nr_regions: The minimum number of adaptive monitoring regions.
- * @max_nr_regions: The maximum number of adaptive monitoring regions.
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
*/
struct damon_ctx {
- unsigned long sample_interval;
- unsigned long aggr_interval;
- unsigned long ops_update_interval;
+ struct damon_attrs attrs;
/* private: internal use only */
struct timespec64 last_aggregation;
@@ -442,8 +460,6 @@ struct damon_ctx {
struct damon_operations ops;
struct damon_callback callback;
- unsigned long min_nr_regions;
- unsigned long max_nr_regions;
struct list_head adaptive_targets;
struct list_head schemes;
};
@@ -463,9 +479,17 @@ static inline struct damon_region *damon_last_region(struct damon_target *t)
return list_last_entry(&t->regions_list, struct damon_region, list);
}
+static inline struct damon_region *damon_first_region(struct damon_target *t)
+{
+ return list_first_entry(&t->regions_list, struct damon_region, list);
+}
+
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
+#define damon_for_each_region_from(r, t) \
+ list_for_each_entry_from(r, &t->regions_list, list)
+
#define damon_for_each_region_safe(r, next, t) \
list_for_each_entry_safe(r, next, &t->regions_list, list)
@@ -501,12 +525,9 @@ void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
unsigned int nr_ranges);
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks);
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s);
@@ -519,10 +540,8 @@ unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
void damon_destroy_ctx(struct damon_ctx *ctx);
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg);
-int damon_set_schemes(struct damon_ctx *ctx,
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
+void damon_set_schemes(struct damon_ctx *ctx,
struct damos **schemes, ssize_t nr_schemes);
int damon_nr_running_ctxs(void);
bool damon_is_registered_ops(enum damon_ops_id id);
@@ -538,6 +557,9 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+
#endif /* CONFIG_DAMON */
#endif /* _DAMON_H */
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index 58aea2d7385c..0da97dba9ef8 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -73,8 +73,8 @@ extern int delayacct_add_tsk(struct taskstats *, struct task_struct *);
extern __u64 __delayacct_blkio_ticks(struct task_struct *);
extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
-extern void __delayacct_thrashing_start(void);
-extern void __delayacct_thrashing_end(void);
+extern void __delayacct_thrashing_start(bool *in_thrashing);
+extern void __delayacct_thrashing_end(bool *in_thrashing);
extern void __delayacct_swapin_start(void);
extern void __delayacct_swapin_end(void);
extern void __delayacct_compact_start(void);
@@ -143,22 +143,22 @@ static inline void delayacct_freepages_end(void)
__delayacct_freepages_end();
}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
{
if (!static_branch_unlikely(&delayacct_key))
return;
if (current->delays)
- __delayacct_thrashing_start();
+ __delayacct_thrashing_start(in_thrashing);
}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_thrashing_end(bool *in_thrashing)
{
if (!static_branch_unlikely(&delayacct_key))
return;
if (current->delays)
- __delayacct_thrashing_end();
+ __delayacct_thrashing_end(in_thrashing);
}
static inline void delayacct_swapin_start(void)
@@ -237,9 +237,9 @@ static inline void delayacct_freepages_start(void)
{}
static inline void delayacct_freepages_end(void)
{}
-static inline void delayacct_thrashing_start(void)
+static inline void delayacct_thrashing_start(bool *in_thrashing)
{}
-static inline void delayacct_thrashing_end(void)
+static inline void delayacct_thrashing_end(bool *in_thrashing)
{}
static inline void delayacct_swapin_start(void)
{}
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index b62c90cfafaf..4029fe368a4f 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -328,8 +328,10 @@ __FORTIFY_INLINE void fortify_memset_chk(__kernel_size_t size,
* __struct_size() vs __member_size() must be captured here to avoid
* evaluating argument side-effects further into the macro layers.
*/
+#ifndef CONFIG_KMSAN
#define memset(p, c, s) __fortify_memset_chk(p, c, s, \
__struct_size(p), __member_size(p))
+#endif
/*
* To make sure the compiler can enforce protection against buffer overflows,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index f314be58fa77..ef4aea3b356e 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -18,6 +18,9 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
+ BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
+ BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
+ GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;
@@ -33,29 +36,6 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
}
-/**
- * gfpflags_normal_context - is gfp_flags a normal sleepable context?
- * @gfp_flags: gfp_flags to test
- *
- * Test whether @gfp_flags indicates that the allocation is from the
- * %current context and allowed to sleep.
- *
- * An allocation being allowed to block doesn't mean it owns the %current
- * context. When direct reclaim path tries to allocate memory, the
- * allocation context is nested inside whatever %current was doing at the
- * time of the original allocation. The nested allocation may be allowed
- * to block but modifying anything %current owns can corrupt the outer
- * context's expectations.
- *
- * %true result from this function indicates that the allocation context
- * can sleep and use anything that's associated with %current.
- */
-static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
-{
- return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
- __GFP_DIRECT_RECLAIM;
-}
-
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 25679035ca28..e9912da5441b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/cacheflush.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -311,6 +312,7 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_memory(page_address(to), PAGE_SIZE);
kunmap_local(vto);
kunmap_local(vfrom);
}
@@ -326,6 +328,7 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_local_page(from);
vto = kmap_local_page(to);
copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
kunmap_local(vto);
kunmap_local(vfrom);
}
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 768e5261fdae..a1341fdcf666 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -168,9 +168,8 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
!inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
}
-bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags,
- bool smaps, bool in_pf);
+bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
+ bool smaps, bool in_pf, bool enforce_sysfs);
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
@@ -219,6 +218,9 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
int advice);
+int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, long adjust_next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
@@ -321,8 +323,8 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
}
static inline bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags,
- bool smaps, bool in_pf)
+ unsigned long vm_flags, bool smaps,
+ bool in_pf, bool enforce_sysfs)
{
return false;
}
@@ -362,9 +364,16 @@ static inline void split_huge_pmd_address(struct vm_area_struct *vma,
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
- BUG();
- return 0;
+ return -EINVAL;
+}
+
+static inline int madvise_collapse(struct vm_area_struct *vma,
+ struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ return -EINVAL;
}
+
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
@@ -435,6 +444,11 @@ static inline int split_folio_to_list(struct folio *folio,
return split_huge_page_to_list(&folio->page, list);
}
+static inline int split_folio(struct folio *folio)
+{
+ return split_folio_to_list(folio, NULL);
+}
+
/*
* archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
* limitations in the implementation like arm64 MTE can override this to
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 1ec1535be04f..95fda85aa195 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -16,6 +16,7 @@
struct ctl_table;
struct user_struct;
struct mmu_gather;
+struct node;
#ifndef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
@@ -114,6 +115,12 @@ struct file_region {
#endif
};
+struct hugetlb_vma_lock {
+ struct kref refs;
+ struct rw_semaphore rw_sema;
+ struct vm_area_struct *vma;
+};
+
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);
@@ -126,7 +133,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
long min_hpages);
void hugepage_put_subpool(struct hugepage_subpool *spool);
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
+void hugetlb_dup_vma_private(struct vm_area_struct *vma);
void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
@@ -214,6 +221,14 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
pgd_t *pgd, int flags);
+void hugetlb_vma_lock_read(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
+void hugetlb_vma_lock_write(struct vm_area_struct *vma);
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
+void hugetlb_vma_lock_release(struct kref *kref);
+
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pud);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -225,7 +240,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
-static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{
}
@@ -336,6 +351,31 @@ static inline int prepare_hugepage_range(struct file *file,
return -EINVAL;
}
+static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
static inline int pmd_huge(pmd_t pmd)
{
return 0;
@@ -665,7 +705,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
unsigned long address);
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
unsigned long address, struct page *page);
@@ -935,6 +975,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_NUMA
+void hugetlb_register_node(struct node *node);
+void hugetlb_unregister_node(struct node *node);
+#endif
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1109,6 +1154,14 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
}
+
+static inline void hugetlb_register_node(struct node *node)
+{
+}
+
+static inline void hugetlb_unregister_node(struct node *node)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
static inline spinlock_t *huge_pte_lock(struct hstate *h,
@@ -1123,14 +1176,10 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
extern void __init hugetlb_cma_reserve(int order);
-extern void __init hugetlb_cma_check(void);
#else
static inline __init void hugetlb_cma_reserve(int order)
{
}
-static inline __init void hugetlb_cma_check(void)
-{
-}
#endif
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 379344828e78..630cd255d0cf 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -90,32 +90,31 @@ hugetlb_cgroup_from_page_rsvd(struct page *page)
return __hugetlb_cgroup_from_page(page, true);
}
-static inline int __set_hugetlb_cgroup(struct page *page,
+static inline void __set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
- return -1;
+ return;
if (rsvd)
set_page_private(page + SUBPAGE_INDEX_CGROUP_RSVD,
(unsigned long)h_cg);
else
set_page_private(page + SUBPAGE_INDEX_CGROUP,
(unsigned long)h_cg);
- return 0;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, false);
+ __set_hugetlb_cgroup(page, h_cg, false);
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return __set_hugetlb_cgroup(page, h_cg, true);
+ __set_hugetlb_cgroup(page, h_cg, true);
}
static inline bool hugetlb_cgroup_disabled(void)
@@ -199,16 +198,14 @@ hugetlb_cgroup_from_page_rsvd(struct page *page)
return NULL;
}
-static inline int set_hugetlb_cgroup(struct page *page,
+static inline void set_hugetlb_cgroup(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
-static inline int set_hugetlb_cgroup_rsvd(struct page *page,
+static inline void set_hugetlb_cgroup_rsvd(struct page *page,
struct hugetlb_cgroup *h_cg)
{
- return 0;
}
static inline bool hugetlb_cgroup_disabled(void)
diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h
index 42faebbaa202..501fa8486749 100644
--- a/include/linux/instrumented.h
+++ b/include/linux/instrumented.h
@@ -2,7 +2,7 @@
/*
* This header provides generic wrappers for memory access instrumentation that
- * the compiler cannot emit for: KASAN, KCSAN.
+ * the compiler cannot emit for: KASAN, KCSAN, KMSAN.
*/
#ifndef _LINUX_INSTRUMENTED_H
#define _LINUX_INSTRUMENTED_H
@@ -10,6 +10,7 @@
#include <linux/compiler.h>
#include <linux/kasan-checks.h>
#include <linux/kcsan-checks.h>
+#include <linux/kmsan-checks.h>
#include <linux/types.h>
/**
@@ -117,10 +118,11 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
{
kasan_check_read(from, n);
kcsan_check_read(from, n);
+ kmsan_copy_to_user(to, from, n, 0);
}
/**
- * instrument_copy_from_user - instrument writes of copy_from_user
+ * instrument_copy_from_user_before - add instrumentation before copy_from_user
*
* Instrument writes to kernel memory, that are due to copy_from_user (and
* variants). The instrumentation should be inserted before the accesses.
@@ -130,10 +132,61 @@ instrument_copy_to_user(void __user *to, const void *from, unsigned long n)
* @n number of bytes to copy
*/
static __always_inline void
-instrument_copy_from_user(const void *to, const void __user *from, unsigned long n)
+instrument_copy_from_user_before(const void *to, const void __user *from, unsigned long n)
{
kasan_check_write(to, n);
kcsan_check_write(to, n);
}
+/**
+ * instrument_copy_from_user_after - add instrumentation after copy_from_user
+ *
+ * Instrument writes to kernel memory, that are due to copy_from_user (and
+ * variants). The instrumentation should be inserted after the accesses.
+ *
+ * @to destination address
+ * @from source address
+ * @n number of bytes to copy
+ * @left number of bytes not copied (as returned by copy_from_user)
+ */
+static __always_inline void
+instrument_copy_from_user_after(const void *to, const void __user *from,
+ unsigned long n, unsigned long left)
+{
+ kmsan_unpoison_memory(to, n - left);
+}
+
+/**
+ * instrument_get_user() - add instrumentation to get_user()-like macros
+ *
+ * get_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @to destination variable, may not be address-taken
+ */
+#define instrument_get_user(to) \
+({ \
+ u64 __tmp = (u64)(to); \
+ kmsan_unpoison_memory(&__tmp, sizeof(__tmp)); \
+ to = __tmp; \
+})
+
+
+/**
+ * instrument_put_user() - add instrumentation to put_user()-like macros
+ *
+ * put_user() and friends are fragile, so it may depend on the implementation
+ * whether the instrumentation happens before or after the data is copied from
+ * the userspace.
+ *
+ * @from source address
+ * @ptr userspace pointer to copy to
+ * @size number of bytes to copy
+ */
+#define instrument_put_user(from, ptr, size) \
+({ \
+ kmsan_copy_to_user(ptr, &from, sizeof(from), 0); \
+})
+
#endif /* _LINUX_INSTRUMENTED_H */
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b092277bf48d..d811b3d7d2a1 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -98,19 +98,13 @@ static inline bool kasan_has_integrated_init(void)
#ifdef CONFIG_KASAN
struct kasan_cache {
+#ifdef CONFIG_KASAN_GENERIC
int alloc_meta_offset;
int free_meta_offset;
+#endif
bool is_kmalloc;
};
-slab_flags_t __kasan_never_merge(void);
-static __always_inline slab_flags_t kasan_never_merge(void)
-{
- if (kasan_enabled())
- return __kasan_never_merge();
- return 0;
-}
-
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -134,15 +128,6 @@ static __always_inline void kasan_unpoison_pages(struct page *page,
__kasan_unpoison_pages(page, order, init);
}
-void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags);
-static __always_inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size, slab_flags_t *flags)
-{
- if (kasan_enabled())
- __kasan_cache_create(cache, size, flags);
-}
-
void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
{
@@ -150,14 +135,6 @@ static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
__kasan_cache_create_kmalloc(cache);
}
-size_t __kasan_metadata_size(struct kmem_cache *cache);
-static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
-{
- if (kasan_enabled())
- return __kasan_metadata_size(cache);
- return 0;
-}
-
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
@@ -269,20 +246,12 @@ static __always_inline bool kasan_check_byte(const void *addr)
#else /* CONFIG_KASAN */
-static inline slab_flags_t kasan_never_merge(void)
-{
- return 0;
-}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) {}
-static inline void kasan_cache_create(struct kmem_cache *cache,
- unsigned int *size,
- slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
-static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
void *object) {}
@@ -333,6 +302,11 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
#ifdef CONFIG_KASAN_GENERIC
+size_t kasan_metadata_size(struct kmem_cache *cache);
+slab_flags_t kasan_never_merge(void);
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags);
+
void kasan_cache_shrink(struct kmem_cache *cache);
void kasan_cache_shutdown(struct kmem_cache *cache);
void kasan_record_aux_stack(void *ptr);
@@ -340,6 +314,21 @@ void kasan_record_aux_stack_noalloc(void *ptr);
#else /* CONFIG_KASAN_GENERIC */
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+ return 0;
+}
+/* And thus nothing prevents cache merging. */
+static inline slab_flags_t kasan_never_merge(void)
+{
+ return 0;
+}
+/* And no cache-related metadata initialization is required. */
+static inline void kasan_cache_create(struct kmem_cache *cache,
+ unsigned int *size,
+ slab_flags_t *flags) {}
+
static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
static inline void kasan_record_aux_stack(void *ptr) {}
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 384f034ae947..70162d707caf 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -16,11 +16,13 @@ extern void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
#ifdef CONFIG_SHMEM
-extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr);
+extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd);
#else
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
#endif
@@ -46,9 +48,10 @@ static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
}
-static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
- unsigned long addr)
+static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr, bool install_pmd)
{
+ return 0;
}
static inline void khugepaged_min_free_kbytes_update(void)
diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h
new file mode 100644
index 000000000000..c4cae333deec
--- /dev/null
+++ b/include/linux/kmsan-checks.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN checks to be used for one-off annotations in subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef _LINUX_KMSAN_CHECKS_H
+#define _LINUX_KMSAN_CHECKS_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_poison_memory() - Mark the memory range as uninitialized.
+ * @address: address to start with.
+ * @size: size of buffer to poison.
+ * @flags: GFP flags for allocations done by this function.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * uninitialized. Error reports for this memory will reference the call site of
+ * kmsan_poison_memory() as origin.
+ */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags);
+
+/**
+ * kmsan_unpoison_memory() - Mark the memory range as initialized.
+ * @address: address to start with.
+ * @size: size of buffer to unpoison.
+ *
+ * Until other data is written to this range, KMSAN will treat it as
+ * initialized.
+ */
+void kmsan_unpoison_memory(const void *address, size_t size);
+
+/**
+ * kmsan_check_memory() - Check the memory range for being initialized.
+ * @address: address to start with.
+ * @size: size of buffer to check.
+ *
+ * If any piece of the given range is marked as uninitialized, KMSAN will report
+ * an error.
+ */
+void kmsan_check_memory(const void *address, size_t size);
+
+/**
+ * kmsan_copy_to_user() - Notify KMSAN about a data transfer to userspace.
+ * @to: destination address in the userspace.
+ * @from: source address in the kernel.
+ * @to_copy: number of bytes to copy.
+ * @left: number of bytes not copied.
+ *
+ * If this is a real userspace data transfer, KMSAN checks the bytes that were
+ * actually copied to ensure there was no information leak. If @to belongs to
+ * the kernel space (which is possible for compat syscalls), KMSAN just copies
+ * the metadata.
+ */
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left);
+
+#else
+
+static inline void kmsan_poison_memory(const void *address, size_t size,
+ gfp_t flags)
+{
+}
+static inline void kmsan_unpoison_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_check_memory(const void *address, size_t size)
+{
+}
+static inline void kmsan_copy_to_user(void __user *to, const void *from,
+ size_t to_copy, size_t left)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_CHECKS_H */
diff --git a/include/linux/kmsan.h b/include/linux/kmsan.h
new file mode 100644
index 000000000000..e38ae3c34618
--- /dev/null
+++ b/include/linux/kmsan.h
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KMSAN API for subsystems.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_H
+#define _LINUX_KMSAN_H
+
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan-checks.h>
+#include <linux/types.h>
+
+struct page;
+struct kmem_cache;
+struct task_struct;
+struct scatterlist;
+struct urb;
+
+#ifdef CONFIG_KMSAN
+
+/**
+ * kmsan_task_create() - Initialize KMSAN state for the task.
+ * @task: task to initialize.
+ */
+void kmsan_task_create(struct task_struct *task);
+
+/**
+ * kmsan_task_exit() - Notify KMSAN that a task has exited.
+ * @task: task about to finish.
+ */
+void kmsan_task_exit(struct task_struct *task);
+
+/**
+ * kmsan_init_shadow() - Initialize KMSAN shadow at boot time.
+ *
+ * Allocate and initialize KMSAN metadata for early allocations.
+ */
+void __init kmsan_init_shadow(void);
+
+/**
+ * kmsan_init_runtime() - Initialize KMSAN state and enable KMSAN.
+ */
+void __init kmsan_init_runtime(void);
+
+/**
+ * kmsan_memblock_free_pages() - handle freeing of memblock pages.
+ * @page: struct page to free.
+ * @order: order of @page.
+ *
+ * Freed pages are either returned to buddy allocator or held back to be used
+ * as metadata pages.
+ */
+bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order);
+
+/**
+ * kmsan_alloc_page() - Notify KMSAN about an alloc_pages() call.
+ * @page: struct page pointer returned by alloc_pages().
+ * @order: order of allocated struct page.
+ * @flags: GFP flags used by alloc_pages()
+ *
+ * KMSAN marks 1<<@order pages starting at @page as uninitialized, unless
+ * @flags contain __GFP_ZERO.
+ */
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
+
+/**
+ * kmsan_free_page() - Notify KMSAN about a free_pages() call.
+ * @page: struct page pointer passed to free_pages().
+ * @order: order of deallocated struct page.
+ *
+ * KMSAN marks freed memory as uninitialized.
+ */
+void kmsan_free_page(struct page *page, unsigned int order);
+
+/**
+ * kmsan_copy_page_meta() - Copy KMSAN metadata between two pages.
+ * @dst: destination page.
+ * @src: source page.
+ *
+ * KMSAN copies the contents of metadata pages for @src into the metadata pages
+ * for @dst. If @dst has no associated metadata pages, nothing happens.
+ * If @src has no associated metadata pages, @dst metadata pages are unpoisoned.
+ */
+void kmsan_copy_page_meta(struct page *dst, struct page *src);
+
+/**
+ * kmsan_slab_alloc() - Notify KMSAN about a slab allocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Depending on cache flags and GFP flags, KMSAN sets up the metadata of the
+ * newly created object, marking it as initialized or uninitialized.
+ */
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
+
+/**
+ * kmsan_slab_free() - Notify KMSAN about a slab deallocation.
+ * @s: slab cache the object belongs to.
+ * @object: object pointer.
+ *
+ * KMSAN marks the freed object as uninitialized.
+ */
+void kmsan_slab_free(struct kmem_cache *s, void *object);
+
+/**
+ * kmsan_kmalloc_large() - Notify KMSAN about a large slab allocation.
+ * @ptr: object pointer.
+ * @size: object size.
+ * @flags: GFP flags passed to the allocator.
+ *
+ * Similar to kmsan_slab_alloc(), but for large allocations.
+ */
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
+
+/**
+ * kmsan_kfree_large() - Notify KMSAN about a large slab deallocation.
+ * @ptr: object pointer.
+ *
+ * Similar to kmsan_slab_free(), but for large allocations.
+ */
+void kmsan_kfree_large(const void *ptr);
+
+/**
+ * kmsan_map_kernel_range_noflush() - Notify KMSAN about a vmap.
+ * @start: start of vmapped range.
+ * @end: end of vmapped range.
+ * @prot: page protection flags used for vmap.
+ * @pages: array of pages.
+ * @page_shift: page_shift passed to vmap_range_noflush().
+ *
+ * KMSAN maps shadow and origin pages of @pages into contiguous ranges in
+ * vmalloc metadata address range.
+ */
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
+/**
+ * kmsan_vunmap_kernel_range_noflush() - Notify KMSAN about a vunmap.
+ * @start: start of vunmapped range.
+ * @end: end of vunmapped range.
+ *
+ * KMSAN unmaps the contiguous metadata ranges created by
+ * kmsan_map_kernel_range_noflush().
+ */
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_ioremap_page_range() - Notify KMSAN about a ioremap_page_range() call.
+ * @addr: range start.
+ * @end: range end.
+ * @phys_addr: physical range start.
+ * @prot: page protection flags used for ioremap_page_range().
+ * @page_shift: page_shift argument passed to vmap_range_noflush().
+ *
+ * KMSAN creates new metadata pages for the physical pages mapped into the
+ * virtual memory.
+ */
+void kmsan_ioremap_page_range(unsigned long addr, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift);
+
+/**
+ * kmsan_iounmap_page_range() - Notify KMSAN about a iounmap_page_range() call.
+ * @start: range start.
+ * @end: range end.
+ *
+ * KMSAN unmaps the metadata pages for the given range and, unlike for
+ * vunmap_page_range(), also deallocates them.
+ */
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end);
+
+/**
+ * kmsan_handle_dma() - Handle a DMA data transfer.
+ * @page: first page of the buffer.
+ * @offset: offset of the buffer within the first page.
+ * @size: buffer size.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffer, if it is copied to device;
+ * * initializes the buffer, if it is copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_dma_sg() - Handle a DMA transfer using scatterlist.
+ * @sg: scatterlist holding DMA buffers.
+ * @nents: number of scatterlist entries.
+ * @dir: one of possible dma_data_direction values.
+ *
+ * Depending on @direction, KMSAN:
+ * * checks the buffers in the scatterlist, if they are copied to device;
+ * * initializes the buffers, if they are copied from device;
+ * * does both, if this is a DMA_BIDIRECTIONAL transfer.
+ */
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+
+/**
+ * kmsan_handle_urb() - Handle a USB data transfer.
+ * @urb: struct urb pointer.
+ * @is_out: data transfer direction (true means output to hardware).
+ *
+ * If @is_out is true, KMSAN checks the transfer buffer of @urb. Otherwise,
+ * KMSAN initializes the transfer buffer.
+ */
+void kmsan_handle_urb(const struct urb *urb, bool is_out);
+
+/**
+ * kmsan_unpoison_entry_regs() - Handle pt_regs in low-level entry code.
+ * @regs: struct pt_regs pointer received from assembly code.
+ *
+ * KMSAN unpoisons the contents of the passed pt_regs, preventing potential
+ * false positive reports. Unlike kmsan_unpoison_memory(),
+ * kmsan_unpoison_entry_regs() can be called from the regions where
+ * kmsan_in_runtime() returns true, which is the case in early entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs);
+
+#else
+
+static inline void kmsan_init_shadow(void)
+{
+}
+
+static inline void kmsan_init_runtime(void)
+{
+}
+
+static inline bool kmsan_memblock_free_pages(struct page *page,
+ unsigned int order)
+{
+ return true;
+}
+
+static inline void kmsan_task_create(struct task_struct *task)
+{
+}
+
+static inline void kmsan_task_exit(struct task_struct *task)
+{
+}
+
+static inline int kmsan_alloc_page(struct page *page, unsigned int order,
+ gfp_t flags)
+{
+ return 0;
+}
+
+static inline void kmsan_free_page(struct page *page, unsigned int order)
+{
+}
+
+static inline void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+}
+
+static inline void kmsan_slab_alloc(struct kmem_cache *s, void *object,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+}
+
+static inline void kmsan_kmalloc_large(const void *ptr, size_t size,
+ gfp_t flags)
+{
+}
+
+static inline void kmsan_kfree_large(const void *ptr)
+{
+}
+
+static inline void kmsan_vmap_pages_range_noflush(unsigned long start,
+ unsigned long end,
+ pgprot_t prot,
+ struct page **pages,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_vunmap_range_noflush(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_ioremap_page_range(unsigned long start,
+ unsigned long end,
+ phys_addr_t phys_addr,
+ pgprot_t prot,
+ unsigned int page_shift)
+{
+}
+
+static inline void kmsan_iounmap_page_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void kmsan_handle_dma(struct page *page, size_t offset,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+}
+
+static inline void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+}
+
+static inline void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+}
+
+#endif
+
+#endif /* _LINUX_KMSAN_H */
diff --git a/include/linux/kmsan_types.h b/include/linux/kmsan_types.h
new file mode 100644
index 000000000000..8bfa6c98176d
--- /dev/null
+++ b/include/linux/kmsan_types.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * A minimal header declaring types added by KMSAN to existing kernel structs.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+#ifndef _LINUX_KMSAN_TYPES_H
+#define _LINUX_KMSAN_TYPES_H
+
+/* These constants are defined in the MSan LLVM instrumentation pass. */
+#define KMSAN_RETVAL_SIZE 800
+#define KMSAN_PARAM_SIZE 800
+
+struct kmsan_context_state {
+ char param_tls[KMSAN_PARAM_SIZE];
+ char retval_tls[KMSAN_RETVAL_SIZE];
+ char va_arg_tls[KMSAN_PARAM_SIZE];
+ char va_arg_origin_tls[KMSAN_PARAM_SIZE];
+ u64 va_arg_overflow_size_tls;
+ char param_origin_tls[KMSAN_PARAM_SIZE];
+ u32 retval_origin_tls;
+};
+
+#undef KMSAN_PARAM_SIZE
+#undef KMSAN_RETVAL_SIZE
+
+struct kmsan_ctx {
+ struct kmsan_context_state cstate;
+ int kmsan_in_runtime;
+ bool allow_reporting;
+};
+
+#endif /* _LINUX_KMSAN_TYPES_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 0b4f17418f64..7e232ba59b86 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -15,9 +15,6 @@
#include <linux/sched.h>
#include <linux/sched/coredump.h>
-struct stable_node;
-struct mem_cgroup;
-
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags);
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
new file mode 100644
index 000000000000..2effab72add1
--- /dev/null
+++ b/include/linux/maple_tree.h
@@ -0,0 +1,685 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _LINUX_MAPLE_TREE_H
+#define _LINUX_MAPLE_TREE_H
+/*
+ * Maple Tree - An RCU-safe adaptive tree for storing ranges
+ * Copyright (c) 2018-2022 Oracle
+ * Authors: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+/* #define CONFIG_MAPLE_RCU_DISABLED */
+/* #define CONFIG_DEBUG_MAPLE_TREE_VERBOSE */
+
+/*
+ * Allocated nodes are mutable until they have been inserted into the tree,
+ * at which time they cannot change their type until they have been removed
+ * from the tree and an RCU grace period has passed.
+ *
+ * Removed nodes have their ->parent set to point to themselves. RCU readers
+ * check ->parent before relying on the value that they loaded from the
+ * slots array. This lets us reuse the slots array for the RCU head.
+ *
+ * Nodes in the tree point to their parent unless bit 0 is set.
+ */
+#if defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64)
+/* 64bit sizes */
+#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
+#else
+/* 32bit sizes */
+#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
+#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
+#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
+#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
+#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
+#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
+
+#define MAPLE_NODE_MASK 255UL
+
+/*
+ * The node->parent of the root node has bit 0 set and the rest of the pointer
+ * is a pointer to the tree itself. No more bits are available in this pointer
+ * (on m68k, the data structure may only be 2-byte aligned).
+ *
+ * Internal non-root nodes can only have maple_range_* nodes as parents. The
+ * parent pointer is 256B aligned like all other tree nodes. When storing a 32
+ * or 64 bit values, the offset can fit into 4 bits. The 16 bit values need an
+ * extra bit to store the offset. This extra bit comes from a reuse of the last
+ * bit in the node type. This is possible by using bit 1 to indicate if bit 2
+ * is part of the type or the slot.
+ *
+ * Once the type is decided, the decision of an allocation range type or a range
+ * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE
+ * flag.
+ *
+ * Node types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and location in the parent pointer:
+ * type : slot location
+ * 0x??1 : Root
+ * 0x?00 : 16 bit values, type in 0-1, slot in 2-6
+ * 0x010 : 32 bit values, type in 0-2, slot in 3-6
+ * 0x110 : 64 bit values, type in 0-2, slot in 3-6
+ */
+
+/*
+ * This metadata is used to optimize the gap updating code and in reverse
+ * searching for gaps or any other code that needs to find the end of the data.
+ */
+struct maple_metadata {
+ unsigned char end;
+ unsigned char gap;
+};
+
+/*
+ * Leaf nodes do not store pointers to nodes, they store user data. Users may
+ * store almost any bit pattern. As noted above, the optimisation of storing an
+ * entry at 0 in the root pointer cannot be done for data which have the bottom
+ * two bits set to '10'. We also reserve values with the bottom two bits set to
+ * '10' which are below 4096 (ie 2, 6, 10 .. 4094) for internal use. Some APIs
+ * return errnos as a negative errno shifted right by two bits and the bottom
+ * two bits set to '10', and while choosing to store these values in the array
+ * is not an error, it may lead to confusion if you're testing for an error with
+ * mas_is_err().
+ *
+ * Non-leaf nodes store the type of the node pointed to (enum maple_type in bits
+ * 3-6), bit 2 is reserved. That leaves bits 0-1 unused for now.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ */
+
+struct maple_range_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_RANGE64_SLOTS - 1];
+ union {
+ void __rcu *slot[MAPLE_RANGE64_SLOTS];
+ struct {
+ void __rcu *pad[MAPLE_RANGE64_SLOTS - 1];
+ struct maple_metadata meta;
+ };
+ };
+};
+
+/*
+ * At tree creation time, the user can specify that they're willing to trade off
+ * storing fewer entries in a tree in return for storing more information in
+ * each node.
+ *
+ * The maple tree supports recording the largest range of NULL entries available
+ * in this node, also called gaps. This optimises the tree for allocating a
+ * range.
+ */
+struct maple_arange_64 {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_ARANGE64_SLOTS - 1];
+ void __rcu *slot[MAPLE_ARANGE64_SLOTS];
+ unsigned long gap[MAPLE_ARANGE64_SLOTS];
+ struct maple_metadata meta;
+};
+
+struct maple_alloc {
+ unsigned long total;
+ unsigned char node_count;
+ unsigned int request_count;
+ struct maple_alloc *slot[MAPLE_ALLOC_SLOTS];
+};
+
+struct maple_topiary {
+ struct maple_pnode *parent;
+ struct maple_enode *next; /* Overlaps the pivot */
+};
+
+enum maple_type {
+ maple_dense,
+ maple_leaf_64,
+ maple_range_64,
+ maple_arange_64,
+};
+
+
+/**
+ * DOC: Maple tree flags
+ *
+ * * MT_FLAGS_ALLOC_RANGE - Track gaps in this tree
+ * * MT_FLAGS_USE_RCU - Operate in RCU mode
+ * * MT_FLAGS_HEIGHT_OFFSET - The position of the tree height in the flags
+ * * MT_FLAGS_HEIGHT_MASK - The mask for the maple tree height value
+ * * MT_FLAGS_LOCK_MASK - How the mt_lock is used
+ * * MT_FLAGS_LOCK_IRQ - Acquired irq-safe
+ * * MT_FLAGS_LOCK_BH - Acquired bh-safe
+ * * MT_FLAGS_LOCK_EXTERN - mt_lock is not used
+ *
+ * MAPLE_HEIGHT_MAX The largest height that can be stored
+ */
+#define MT_FLAGS_ALLOC_RANGE 0x01
+#define MT_FLAGS_USE_RCU 0x02
+#define MT_FLAGS_HEIGHT_OFFSET 0x02
+#define MT_FLAGS_HEIGHT_MASK 0x7C
+#define MT_FLAGS_LOCK_MASK 0x300
+#define MT_FLAGS_LOCK_IRQ 0x100
+#define MT_FLAGS_LOCK_BH 0x200
+#define MT_FLAGS_LOCK_EXTERN 0x300
+
+#define MAPLE_HEIGHT_MAX 31
+
+
+#define MAPLE_NODE_TYPE_MASK 0x0F
+#define MAPLE_NODE_TYPE_SHIFT 0x03
+
+#define MAPLE_RESERVED_RANGE 4096
+
+#ifdef CONFIG_LOCKDEP
+typedef struct lockdep_map *lockdep_map_p;
+#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_set_external_lock(mt, lock) \
+ (mt)->ma_external_lock = &(lock)->dep_map
+#else
+typedef struct { /* nothing */ } lockdep_map_p;
+#define mt_lock_is_held(mt) 1
+#define mt_set_external_lock(mt, lock) do { } while (0)
+#endif
+
+/*
+ * If the tree contains a single entry at index 0, it is usually stored in
+ * tree->ma_root. To optimise for the page cache, an entry which ends in '00',
+ * '01' or '11' is stored in the root, but an entry which ends in '10' will be
+ * stored in a node. Bits 3-6 are used to store enum maple_type.
+ *
+ * The flags are used both to store some immutable information about this tree
+ * (set at tree creation time) and dynamic information set under the spinlock.
+ *
+ * Another use of flags are to indicate global states of the tree. This is the
+ * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in
+ * RCU mode. This mode was added to allow the tree to reuse nodes instead of
+ * re-allocating and RCU freeing nodes when there is a single user.
+ */
+struct maple_tree {
+ union {
+ spinlock_t ma_lock;
+ lockdep_map_p ma_external_lock;
+ };
+ void __rcu *ma_root;
+ unsigned int ma_flags;
+};
+
+/**
+ * MTREE_INIT() - Initialize a maple tree
+ * @name: The maple tree name
+ * @__flags: The maple tree flags
+ *
+ */
+#define MTREE_INIT(name, __flags) { \
+ .ma_lock = __SPIN_LOCK_UNLOCKED((name).ma_lock), \
+ .ma_flags = __flags, \
+ .ma_root = NULL, \
+}
+
+/**
+ * MTREE_INIT_EXT() - Initialize a maple tree with an external lock.
+ * @name: The tree name
+ * @__flags: The maple tree flags
+ * @__lock: The external lock
+ */
+#ifdef CONFIG_LOCKDEP
+#define MTREE_INIT_EXT(name, __flags, __lock) { \
+ .ma_external_lock = &(__lock).dep_map, \
+ .ma_flags = (__flags), \
+ .ma_root = NULL, \
+}
+#else
+#define MTREE_INIT_EXT(name, __flags, __lock) MTREE_INIT(name, __flags)
+#endif
+
+#define DEFINE_MTREE(name) \
+ struct maple_tree name = MTREE_INIT(name, 0)
+
+#define mtree_lock(mt) spin_lock((&(mt)->ma_lock))
+#define mtree_unlock(mt) spin_unlock((&(mt)->ma_lock))
+
+/*
+ * The Maple Tree squeezes various bits in at various points which aren't
+ * necessarily obvious. Usually, this is done by observing that pointers are
+ * N-byte aligned and thus the bottom log_2(N) bits are available for use. We
+ * don't use the high bits of pointers to store additional information because
+ * we don't know what bits are unused on any given architecture.
+ *
+ * Nodes are 256 bytes in size and are also aligned to 256 bytes, giving us 8
+ * low bits for our own purposes. Nodes are currently of 4 types:
+ * 1. Single pointer (Range is 0-0)
+ * 2. Non-leaf Allocation Range nodes
+ * 3. Non-leaf Range nodes
+ * 4. Leaf Range nodes All nodes consist of a number of node slots,
+ * pivots, and a parent pointer.
+ */
+
+struct maple_node {
+ union {
+ struct {
+ struct maple_pnode *parent;
+ void __rcu *slot[MAPLE_NODE_SLOTS];
+ };
+ struct {
+ void *pad;
+ struct rcu_head rcu;
+ struct maple_enode *piv_parent;
+ unsigned char parent_slot;
+ enum maple_type type;
+ unsigned char slot_len;
+ unsigned int ma_flags;
+ };
+ struct maple_range_64 mr64;
+ struct maple_arange_64 ma64;
+ struct maple_alloc alloc;
+ };
+};
+
+/*
+ * More complicated stores can cause two nodes to become one or three and
+ * potentially alter the height of the tree. Either half of the tree may need
+ * to be rebalanced against the other. The ma_topiary struct is used to track
+ * which nodes have been 'cut' from the tree so that the change can be done
+ * safely at a later date. This is done to support RCU.
+ */
+struct ma_topiary {
+ struct maple_enode *head;
+ struct maple_enode *tail;
+ struct maple_tree *mtree;
+};
+
+void *mtree_load(struct maple_tree *mt, unsigned long index);
+
+int mtree_insert(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp);
+
+int mtree_store_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp);
+int mtree_store(struct maple_tree *mt, unsigned long index,
+ void *entry, gfp_t gfp);
+void *mtree_erase(struct maple_tree *mt, unsigned long index);
+
+void mtree_destroy(struct maple_tree *mt);
+void __mt_destroy(struct maple_tree *mt);
+
+/**
+ * mtree_empty() - Determine if a tree has any present entries.
+ * @mt: Maple Tree.
+ *
+ * Context: Any context.
+ * Return: %true if the tree contains only NULL pointers.
+ */
+static inline bool mtree_empty(const struct maple_tree *mt)
+{
+ return mt->ma_root == NULL;
+}
+
+/* Advanced API */
+
+/*
+ * The maple state is defined in the struct ma_state and is used to keep track
+ * of information during operations, and even between operations when using the
+ * advanced API.
+ *
+ * If state->node has bit 0 set then it references a tree location which is not
+ * a node (eg the root). If bit 1 is set, the rest of the bits are a negative
+ * errno. Bit 2 (the 'unallocated slots' bit) is clear. Bits 3-6 indicate the
+ * node type.
+ *
+ * state->alloc either has a request number of nodes or an allocated node. If
+ * stat->alloc has a requested number of nodes, the first bit will be set (0x1)
+ * and the remaining bits are the value. If state->alloc is a node, then the
+ * node will be of type maple_alloc. maple_alloc has MAPLE_NODE_SLOTS - 1 for
+ * storing more allocated nodes, a total number of nodes allocated, and the
+ * node_count in this node. node_count is the number of allocated nodes in this
+ * node. The scaling beyond MAPLE_NODE_SLOTS - 1 is handled by storing further
+ * nodes into state->alloc->slot[0]'s node. Nodes are taken from state->alloc
+ * by removing a node from the state->alloc node until state->alloc->node_count
+ * is 1, when state->alloc is returned and the state->alloc->slot[0] is promoted
+ * to state->alloc. Nodes are pushed onto state->alloc by putting the current
+ * state->alloc into the pushed node's slot[0].
+ *
+ * The state also contains the implied min/max of the state->node, the depth of
+ * this search, and the offset. The implied min/max are either from the parent
+ * node or are 0-oo for the root node. The depth is incremented or decremented
+ * every time a node is walked down or up. The offset is the slot/pivot of
+ * interest in the node - either for reading or writing.
+ *
+ * When returning a value the maple state index and last respectively contain
+ * the start and end of the range for the entry. Ranges are inclusive in the
+ * Maple Tree.
+ */
+struct ma_state {
+ struct maple_tree *tree; /* The tree we're operating in */
+ unsigned long index; /* The index we're operating on - range start */
+ unsigned long last; /* The last index we're operating on - range end */
+ struct maple_enode *node; /* The node containing this entry */
+ unsigned long min; /* The minimum index of this node - implied pivot min */
+ unsigned long max; /* The maximum index of this node - implied pivot max */
+ struct maple_alloc *alloc; /* Allocated nodes for this operation */
+ unsigned char depth; /* depth of tree descent during write */
+ unsigned char offset;
+ unsigned char mas_flags;
+};
+
+struct ma_wr_state {
+ struct ma_state *mas;
+ struct maple_node *node; /* Decoded mas->node */
+ unsigned long r_min; /* range min */
+ unsigned long r_max; /* range max */
+ enum maple_type type; /* mas->node type */
+ unsigned char offset_end; /* The offset where the write ends */
+ unsigned char node_end; /* mas->node end */
+ unsigned long *pivots; /* mas->node->pivots pointer */
+ unsigned long end_piv; /* The pivot at the offset end */
+ void __rcu **slots; /* mas->node->slots pointer */
+ void *entry; /* The entry to write */
+ void *content; /* The existing entry that is being overwritten */
+};
+
+#define mas_lock(mas) spin_lock(&((mas)->tree->ma_lock))
+#define mas_unlock(mas) spin_unlock(&((mas)->tree->ma_lock))
+
+
+/*
+ * Special values for ma_state.node.
+ * MAS_START means we have not searched the tree.
+ * MAS_ROOT means we have searched the tree and the entry we found lives in
+ * the root of the tree (ie it has index 0, length 1 and is the only entry in
+ * the tree).
+ * MAS_NONE means we have searched the tree and there is no node in the
+ * tree for this entry. For example, we searched for index 1 in an empty
+ * tree. Or we have a tree which points to a full leaf node and we
+ * searched for an entry which is larger than can be contained in that
+ * leaf node.
+ * MA_ERROR represents an errno. After dropping the lock and attempting
+ * to resolve the error, the walk would have to be restarted from the
+ * top of the tree as the tree may have been modified.
+ */
+#define MAS_START ((struct maple_enode *)1UL)
+#define MAS_ROOT ((struct maple_enode *)5UL)
+#define MAS_NONE ((struct maple_enode *)9UL)
+#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MA_ERROR(err) \
+ ((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
+
+#define MA_STATE(name, mt, first, end) \
+ struct ma_state name = { \
+ .tree = mt, \
+ .index = first, \
+ .last = end, \
+ .node = MAS_START, \
+ .min = 0, \
+ .max = ULONG_MAX, \
+ .alloc = NULL, \
+ }
+
+#define MA_WR_STATE(name, ma_state, wr_entry) \
+ struct ma_wr_state name = { \
+ .mas = ma_state, \
+ .content = NULL, \
+ .entry = wr_entry, \
+ }
+
+#define MA_TOPIARY(name, tree) \
+ struct ma_topiary name = { \
+ .head = NULL, \
+ .tail = NULL, \
+ .mtree = tree, \
+ }
+
+void *mas_walk(struct ma_state *mas);
+void *mas_store(struct ma_state *mas, void *entry);
+void *mas_erase(struct ma_state *mas);
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp);
+void mas_store_prealloc(struct ma_state *mas, void *entry);
+void *mas_find(struct ma_state *mas, unsigned long max);
+void *mas_find_rev(struct ma_state *mas, unsigned long min);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
+bool mas_is_err(struct ma_state *mas);
+
+bool mas_nomem(struct ma_state *mas, gfp_t gfp);
+void mas_pause(struct ma_state *mas);
+void maple_tree_init(void);
+void mas_destroy(struct ma_state *mas);
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries);
+
+void *mas_prev(struct ma_state *mas, unsigned long min);
+void *mas_next(struct ma_state *mas, unsigned long max);
+
+int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long max,
+ unsigned long size);
+
+/* Checks if a mas has not found anything */
+static inline bool mas_is_none(struct ma_state *mas)
+{
+ return mas->node == MAS_NONE;
+}
+
+/* Checks if a mas has been paused */
+static inline bool mas_is_paused(struct ma_state *mas)
+{
+ return mas->node == MAS_PAUSE;
+}
+
+void mas_dup_tree(struct ma_state *oldmas, struct ma_state *mas);
+void mas_dup_store(struct ma_state *mas, void *entry);
+
+/*
+ * This finds an empty area from the highest address to the lowest.
+ * AKA "Topdown" version,
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size);
+/**
+ * mas_reset() - Reset a Maple Tree operation state.
+ * @mas: Maple Tree operation state.
+ *
+ * Resets the error or walk state of the @mas so future walks of the
+ * array will start from the root. Use this if you have dropped the
+ * lock and want to reuse the ma_state.
+ *
+ * Context: Any context.
+ */
+static inline void mas_reset(struct ma_state *mas)
+{
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_for_each() - Iterate over a range of the maple tree.
+ * @__mas: Maple Tree operation state (maple_state)
+ * @__entry: Entry retrieved from the tree
+ * @__max: maximum index to retrieve from the tree
+ *
+ * When returned, mas->index and mas->last will hold the entire range for the
+ * entry.
+ *
+ * Note: may return the zero entry.
+ *
+ */
+#define mas_for_each(__mas, __entry, __max) \
+ while (((__entry) = mas_find((__mas), (__max))) != NULL)
+
+
+/**
+ * mas_set_range() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * Move the operation state to refer to a different range. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline
+void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
+{
+ mas->index = start;
+ mas->last = last;
+ mas->node = MAS_START;
+}
+
+/**
+ * mas_set() - Set up Maple Tree operation state for a different index.
+ * @mas: Maple Tree operation state.
+ * @index: New index into the Maple Tree.
+ *
+ * Move the operation state to refer to a different index. This will
+ * have the effect of starting a walk from the top; see mas_next()
+ * to move to an adjacent index.
+ */
+static inline void mas_set(struct ma_state *mas, unsigned long index)
+{
+
+ mas_set_range(mas, index, index);
+}
+
+static inline bool mt_external_lock(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_LOCK_MASK) == MT_FLAGS_LOCK_EXTERN;
+}
+
+/**
+ * mt_init_flags() - Initialise an empty maple tree with flags.
+ * @mt: Maple Tree
+ * @flags: maple tree flags.
+ *
+ * If you need to initialise a Maple Tree with special flags (eg, an
+ * allocation tree), use this function.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init_flags(struct maple_tree *mt, unsigned int flags)
+{
+ mt->ma_flags = flags;
+ if (!mt_external_lock(mt))
+ spin_lock_init(&mt->ma_lock);
+ rcu_assign_pointer(mt->ma_root, NULL);
+}
+
+/**
+ * mt_init() - Initialise an empty maple tree.
+ * @mt: Maple Tree
+ *
+ * An empty Maple Tree.
+ *
+ * Context: Any context.
+ */
+static inline void mt_init(struct maple_tree *mt)
+{
+ mt_init_flags(mt, 0);
+}
+
+static inline bool mt_in_rcu(struct maple_tree *mt)
+{
+#ifdef CONFIG_MAPLE_RCU_DISABLED
+ return false;
+#endif
+ return mt->ma_flags & MT_FLAGS_USE_RCU;
+}
+
+/**
+ * mt_clear_in_rcu() - Switch the tree to non-RCU mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_clear_in_rcu(struct maple_tree *mt)
+{
+ if (!mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags &= ~MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+/**
+ * mt_set_in_rcu() - Switch the tree to RCU safe mode.
+ * @mt: The Maple Tree
+ */
+static inline void mt_set_in_rcu(struct maple_tree *mt)
+{
+ if (mt_in_rcu(mt))
+ return;
+
+ if (mt_external_lock(mt)) {
+ BUG_ON(!mt_lock_is_held(mt));
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ } else {
+ mtree_lock(mt);
+ mt->ma_flags |= MT_FLAGS_USE_RCU;
+ mtree_unlock(mt);
+ }
+}
+
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max);
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max);
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min);
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
+
+/**
+ * mt_for_each - Iterate over each entry starting at index until max.
+ * @__tree: The Maple Tree
+ * @__entry: The current entry
+ * @__index: The index to update to track the location in the tree
+ * @__max: The maximum limit for @index
+ *
+ * Note: Will not return the zero entry.
+ */
+#define mt_for_each(__tree, __entry, __index, __max) \
+ for (__entry = mt_find(__tree, &(__index), __max); \
+ __entry; __entry = mt_find_after(__tree, &(__index), __max))
+
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+extern atomic_t maple_tree_tests_run;
+extern atomic_t maple_tree_tests_passed;
+
+void mt_dump(const struct maple_tree *mt);
+void mt_validate(struct maple_tree *mt);
+#define MT_BUG_ON(__tree, __x) do { \
+ atomic_inc(&maple_tree_tests_run); \
+ if (__x) { \
+ pr_info("BUG at %s:%d (%u)\n", \
+ __func__, __LINE__, __x); \
+ mt_dump(__tree); \
+ pr_info("Pass: %u Run:%u\n", \
+ atomic_read(&maple_tree_tests_passed), \
+ atomic_read(&maple_tree_tests_run)); \
+ dump_stack(); \
+ } else { \
+ atomic_inc(&maple_tree_tests_passed); \
+ } \
+} while (0)
+#else
+#define MT_BUG_ON(__tree, __x) BUG_ON(__x)
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
+
+#endif /*_LINUX_MAPLE_TREE_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 567f12323f55..e1644a24009c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -80,29 +80,8 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-struct memcg_vmstats_percpu {
- /* Local (CPU and cgroup) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Delta calculation for lockless upward propagation */
- long state_prev[MEMCG_NR_STAT];
- unsigned long events_prev[NR_VM_EVENT_ITEMS];
-
- /* Cgroup1: threshold notifications & softlimit tree updates */
- unsigned long nr_page_events;
- unsigned long targets[MEM_CGROUP_NTARGETS];
-};
-
-struct memcg_vmstats {
- /* Aggregated (CPU and subtree) page state & events */
- long state[MEMCG_NR_STAT];
- unsigned long events[NR_VM_EVENT_ITEMS];
-
- /* Pending child counts during tree propagation */
- long state_pending[MEMCG_NR_STAT];
- unsigned long events_pending[NR_VM_EVENT_ITEMS];
-};
+struct memcg_vmstats_percpu;
+struct memcg_vmstats;
struct mem_cgroup_reclaim_iter {
struct mem_cgroup *position;
@@ -185,15 +164,6 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
-#if defined(CONFIG_SMP)
-struct memcg_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define MEMCG_PADDING(name) struct memcg_padding name
-#else
-#define MEMCG_PADDING(name)
-#endif
-
/*
* Remember four most recent foreign writebacks with dirty pages in this
* cgroup. Inode sharing is expected to be uncommon and, even if we miss
@@ -304,10 +274,10 @@ struct mem_cgroup {
spinlock_t move_lock;
unsigned long move_lock_flags;
- MEMCG_PADDING(_pad1_);
+ CACHELINE_PADDING(_pad1_);
/* memory.stat */
- struct memcg_vmstats vmstats;
+ struct memcg_vmstats *vmstats;
/* memory.events */
atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
@@ -326,7 +296,7 @@ struct mem_cgroup {
struct list_head objcg_list;
#endif
- MEMCG_PADDING(_pad2_);
+ CACHELINE_PADDING(_pad2_);
/*
* set > 0 if pages under this cgroup are moving to other cgroup.
@@ -350,14 +320,20 @@ struct mem_cgroup {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* per-memcg mm_struct list */
+ struct lru_gen_mm_list mm_list;
+#endif
+
struct mem_cgroup_per_node *nodeinfo[];
};
/*
- * size of first charge trial. "32" comes from vmscan.c's magic value.
- * TODO: maybe necessary to use big numbers in big irons.
+ * size of first charge trial.
+ * TODO: maybe necessary to use big numbers in big irons or dynamic based of the
+ * workload.
*/
-#define MEMCG_CHARGE_BATCH 32U
+#define MEMCG_CHARGE_BATCH 64U
extern struct mem_cgroup *root_mem_cgroup;
@@ -444,6 +420,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
+ * - mem_cgroup_trylock_pages()
*
* For a kmem folio a caller should hold an rcu read lock to protect memcg
* associated with a kmem folio from being released.
@@ -505,6 +482,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
+ * - mem_cgroup_trylock_pages()
*
* For a kmem page a caller should hold an rcu read lock to protect memcg
* associated with a kmem page from being released.
@@ -689,7 +667,7 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -959,6 +937,23 @@ void unlock_page_memcg(struct page *page);
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
+/* try to stablize folio_memcg() for all the pages in a memcg */
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ rcu_read_lock();
+
+ if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account))
+ return true;
+
+ rcu_read_unlock();
+ return false;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
@@ -985,15 +980,7 @@ static inline void mod_memcg_page_state(struct page *page,
rcu_read_unlock();
}
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
-{
- long x = READ_ONCE(memcg->vmstats.state[idx]);
-#ifdef CONFIG_SMP
- if (x < 0)
- x = 0;
-#endif
- return x;
-}
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx);
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
@@ -1238,7 +1225,7 @@ static inline int mem_cgroup_charge(struct folio *folio,
return 0;
}
-static inline int mem_cgroup_swapin_charge_page(struct page *page,
+static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
return 0;
@@ -1433,6 +1420,18 @@ static inline void folio_memcg_unlock(struct folio *folio)
{
}
+static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg)
+{
+ /* to match folio_memcg_rcu() */
+ rcu_read_lock();
+ return true;
+}
+
+static inline void mem_cgroup_unlock_pages(void)
+{
+ rcu_read_unlock();
+}
+
static inline void mem_cgroup_handle_over_high(void)
{
}
@@ -1779,7 +1778,7 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
struct mem_cgroup *memcg;
- if (mem_cgroup_kmem_disabled())
+ if (!memcg_kmem_enabled())
return;
rcu_read_lock();
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
new file mode 100644
index 000000000000..965009aa01d7
--- /dev/null
+++ b/include/linux/memory-tiers.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_TIERS_H
+#define _LINUX_MEMORY_TIERS_H
+
+#include <linux/types.h>
+#include <linux/nodemask.h>
+#include <linux/kref.h>
+#include <linux/mmzone.h>
+/*
+ * Each tier cover a abstrace distance chunk size of 128
+ */
+#define MEMTIER_CHUNK_BITS 7
+#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+/*
+ * Smaller abstract distance values imply faster (higher) memory tiers. Offset
+ * the DRAM adistance so that we can accommodate devices with a slightly lower
+ * adistance value (slightly faster) than default DRAM adistance to be part of
+ * the same memory tier.
+ */
+#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_HOTPLUG_PRIO 100
+
+struct memory_tier;
+struct memory_dev_type {
+ /* list of memory types that are part of same tier as this type */
+ struct list_head tier_sibiling;
+ /* abstract distance for this specific memory type */
+ int adistance;
+ /* Nodes of same abstract distance */
+ nodemask_t nodes;
+ struct kref kref;
+};
+
+#ifdef CONFIG_NUMA
+extern bool numa_demotion_enabled;
+struct memory_dev_type *alloc_memory_type(int adistance);
+void destroy_memory_type(struct memory_dev_type *memtype);
+void init_node_memory_type(int node, struct memory_dev_type *default_type);
+void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+#ifdef CONFIG_MIGRATION
+int next_demotion_node(int node);
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
+bool node_is_toptier(int node);
+#else
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif
+
+#else
+
+#define numa_demotion_enabled false
+/*
+ * CONFIG_NUMA implementation returns non NULL error.
+ */
+static inline struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ return NULL;
+}
+
+static inline void destroy_memory_type(struct memory_dev_type *memtype)
+{
+
+}
+
+static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
+{
+
+}
+
+static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+}
+
+static inline int next_demotion_node(int node)
+{
+ return NUMA_NO_NODE;
+}
+
+static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ *targets = NODE_MASK_NONE;
+}
+
+static inline bool node_is_toptier(int node)
+{
+ return true;
+}
+#endif /* CONFIG_NUMA */
+#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index e0b2209ab71c..9fcbf5706595 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -11,7 +11,6 @@ struct page;
struct zone;
struct pglist_data;
struct mem_section;
-struct memory_block;
struct memory_group;
struct resource;
struct vmem_altmap;
@@ -44,11 +43,6 @@ extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
({ \
memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
})
-/*
- * This definition is just for error path in node hotadd.
- * For node hotremove, we have to replace this.
- */
-#define generic_free_nodedata(pgdat) kfree(pgdat)
extern pg_data_t *node_data[];
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
@@ -64,9 +58,6 @@ static inline pg_data_t *generic_alloc_nodedata(int nid)
BUG();
return NULL;
}
-static inline void generic_free_nodedata(pg_data_t *pgdat)
-{
-}
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
{
}
@@ -216,6 +207,22 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
+/* See kswapd_is_running() */
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat)
+{
+ mutex_lock(&pgdat->kswapd_lock);
+}
+
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat)
+{
+ mutex_unlock(&pgdat->kswapd_lock);
+}
+
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat)
+{
+ mutex_init(&pgdat->kswapd_lock);
+}
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
@@ -252,6 +259,10 @@ static inline bool movable_node_is_enabled(void)
{
return false;
}
+
+static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {}
+static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {}
#endif /* ! CONFIG_MEMORY_HOTPLUG */
/*
@@ -333,7 +344,6 @@ extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
-extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 668389b4b53d..d232de7cdc56 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -151,13 +151,6 @@ extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- struct mempolicy *mpol = get_task_policy(current);
-
- return policy_nodemask(gfp, mpol);
-}
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -189,6 +182,7 @@ static inline bool mpol_is_preferred_many(struct mempolicy *pol)
return (pol->mode == MPOL_PREFERRED_MANY);
}
+extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
#else
@@ -294,11 +288,6 @@ static inline void mpol_put_task_policy(struct task_struct *task)
{
}
-static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
-{
- return NULL;
-}
-
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
{
return false;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 22c0a0cf5e0c..704a04f5a074 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -100,21 +100,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
-#if defined(CONFIG_MIGRATION) && defined(CONFIG_NUMA)
-extern void set_migration_target_nodes(void);
-extern void migrate_on_reclaim_init(void);
-extern bool numa_demotion_enabled;
-extern int next_demotion_node(int node);
-#else
-static inline void set_migration_target_nodes(void) {}
-static inline void migrate_on_reclaim_init(void) {}
-static inline int next_demotion_node(int node)
-{
- return NUMA_NO_NODE;
-}
-#define numa_demotion_enabled false
-#endif
-
#ifdef CONFIG_COMPACTION
bool PageMovable(struct page *page);
void __SetPageMovable(struct page *page, const struct movable_operations *ops);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 21f8b27bd9fd..8bbcccbc5565 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -661,6 +661,38 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline
+struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
+{
+ return mas_find(&vmi->mas, max);
+}
+
+static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
+{
+ /*
+ * Uses vma_find() to get the first VMA when the iterator starts.
+ * Calling mas_next() could skip the first entry.
+ */
+ return vma_find(vmi, ULONG_MAX);
+}
+
+static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
+{
+ return mas_prev(&vmi->mas, 0);
+}
+
+static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
+{
+ return vmi->mas.index;
+}
+
+#define for_each_vma(__vmi, __vma) \
+ while (((__vma) = vma_next(&(__vmi))) != NULL)
+
+/* The MM code likes to work with exclusive end addresses */
+#define for_each_vma_range(__vmi, __vma, __end) \
+ while (((__vma) = vma_find(&(__vmi), (__end) - 1)) != NULL)
+
#ifdef CONFIG_SHMEM
/*
* The vma_is_shmem is not inline because it is used only by slow
@@ -697,7 +729,9 @@ static inline unsigned int compound_order(struct page *page)
*/
static inline unsigned int folio_order(struct folio *folio)
{
- return compound_order(&folio->page);
+ if (!folio_test_large(folio))
+ return 0;
+ return folio->_folio_order;
}
#include <linux/huge_mm.h>
@@ -1255,6 +1289,18 @@ static inline int folio_nid(const struct folio *folio)
}
#ifdef CONFIG_NUMA_BALANCING
+/* page access time bits needs to hold at least 4 seconds */
+#define PAGE_ACCESS_TIME_MIN_BITS 12
+#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
+#define PAGE_ACCESS_TIME_BUCKETS \
+ (PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
+#else
+#define PAGE_ACCESS_TIME_BUCKETS 0
+#endif
+
+#define PAGE_ACCESS_TIME_MASK \
+ (LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
+
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
@@ -1318,12 +1364,25 @@ static inline void page_cpupid_reset_last(struct page *page)
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
+
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ int last_time;
+
+ last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ return last_time << PAGE_ACCESS_TIME_BUCKETS;
+}
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
return page_to_nid(page); /* XXX */
}
+static inline int xchg_page_access_time(struct page *page, int time)
+{
+ return 0;
+}
+
static inline int page_cpupid_last(struct page *page)
{
return page_to_nid(page); /* XXX */
@@ -1465,6 +1524,11 @@ static inline unsigned long folio_pfn(struct folio *folio)
return page_to_pfn(&folio->page);
}
+static inline struct folio *pfn_folio(unsigned long pfn)
+{
+ return page_folio(pfn_to_page(pfn));
+}
+
static inline atomic_t *folio_pincount_ptr(struct folio *folio)
{
return &folio_page(folio, 1)->compound_pincount;
@@ -1597,7 +1661,13 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
static inline long folio_nr_pages(struct folio *folio)
{
- return compound_nr(&folio->page);
+ if (!folio_test_large(folio))
+ return 1;
+#ifdef CONFIG_64BIT
+ return folio->_folio_nr_pages;
+#else
+ return 1L << folio->_folio_order;
+#endif
}
/**
@@ -1776,7 +1846,11 @@ extern void pagefault_out_of_memory(void);
*/
#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
+extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
+}
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
@@ -1795,8 +1869,9 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
-void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long start, unsigned long end);
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *start_vma, unsigned long start,
+ unsigned long end);
struct mmu_notifier_range;
@@ -2495,7 +2570,6 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
extern void get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn);
-extern unsigned long find_min_pfn_with_active_regions(void);
#ifndef CONFIG_NUMA
static inline int early_pfn_to_nid(unsigned long pfn)
@@ -2516,7 +2590,12 @@ extern void calculate_min_free_kbytes(void);
extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void);
extern void __init mmap_init(void);
-extern void show_mem(unsigned int flags, nodemask_t *nodemask);
+
+extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
+static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+{
+ __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
@@ -2593,14 +2672,15 @@ extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
extern int split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
-extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
- struct rb_node **, struct rb_node *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas);
+void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas);
+
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
unsigned long start,
@@ -2648,8 +2728,9 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
unsigned long pgoff, unsigned long *populate, struct list_head *uf);
-extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
- struct list_head *uf, bool downgrade);
+extern int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
@@ -2716,26 +2797,12 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-/**
- * find_vma_intersection() - Look up the first VMA which intersects the interval
- * @mm: The process address space.
- * @start_addr: The inclusive start user address.
- * @end_addr: The exclusive end user address.
- *
- * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
- * start_addr < end_addr.
+/*
+ * Look up the first VMA which intersects the interval [start_addr, end_addr)
+ * NULL if none. Assume start_addr < end_addr.
*/
-static inline
struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
- unsigned long start_addr,
- unsigned long end_addr)
-{
- struct vm_area_struct *vma = find_vma(mm, start_addr);
-
- if (vma && end_addr <= vma->vm_start)
- vma = NULL;
- return vma;
-}
+ unsigned long start_addr, unsigned long end_addr);
/**
* vma_lookup() - Find a VMA at a specific address
@@ -2747,12 +2814,7 @@ struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
static inline
struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct *vma = find_vma(mm, addr);
-
- if (vma && addr < vma->vm_start)
- vma = NULL;
-
- return vma;
+ return mtree_load(&mm->mm_mt, addr);
}
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
@@ -2788,7 +2850,7 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
unsigned long vm_start, unsigned long vm_end)
{
- struct vm_area_struct *vma = find_vma(mm, vm_start);
+ struct vm_area_struct *vma = vma_lookup(mm, vm_start);
if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
vma = NULL;
@@ -2888,7 +2950,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* and return without waiting upon it */
#define FOLL_NOFAULT 0x80 /* do not fault in pages */
#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
-#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
@@ -2975,8 +3036,8 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
* PageAnonExclusive() has to protect against concurrent GUP:
* * Ordinary GUP: Using the PT lock
* * GUP-fast and fork(): mm->write_protect_seq
- * * GUP-fast and KSM or temporary unmapping (swap, migration):
- * clear/invalidate+flush of the page table entry
+ * * GUP-fast and KSM or temporary unmapping (swap, migration): see
+ * page_try_share_anon_rmap()
*
* Must be called with the (sub)page that's actually referenced via the
* page table entry, which might not necessarily be the head page for a
@@ -2997,6 +3058,11 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
*/
if (!PageAnon(page))
return false;
+
+ /* Paired with a memory barrier in page_try_share_anon_rmap(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_rmb();
+
/*
* Note that PageKsm() pages cannot be exclusive, and consequently,
* cannot get pinned.
@@ -3004,6 +3070,21 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page)
return !PageAnonExclusive(page);
}
+/*
+ * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
+ * a (NUMA hinting) fault is required.
+ */
+static inline bool gup_can_follow_protnone(unsigned int flags)
+{
+ /*
+ * FOLL_FORCE has to be able to make progress even if the VMA is
+ * inaccessible. Further, FOLL_FORCE access usually does not represent
+ * application behaviour and we should avoid triggering NUMA hinting
+ * faults.
+ */
+ return flags & FOLL_FORCE;
+}
+
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
@@ -3011,7 +3092,7 @@ extern int apply_to_existing_page_range(struct mm_struct *mm,
unsigned long address, unsigned long size,
pte_fn_t fn, void *data);
-extern void init_mem_debugging_and_hardening(void);
+extern void __init init_mem_debugging_and_hardening(void);
#ifdef CONFIG_PAGE_POISONING
extern void __kernel_poison_pages(struct page *page, int numpages);
extern void __kernel_unpoison_pages(struct page *page, int numpages);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 7b25b53c474a..e8ed225d8f7c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -34,15 +34,25 @@ static inline int page_is_file_lru(struct page *page)
return folio_is_file_lru(page_folio(page));
}
-static __always_inline void update_lru_size(struct lruvec *lruvec,
+static __always_inline void __update_lru_size(struct lruvec *lruvec,
enum lru_list lru, enum zone_type zid,
long nr_pages)
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+ lockdep_assert_held(&lruvec->lru_lock);
+ WARN_ON_ONCE(nr_pages != (int)nr_pages);
+
__mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
+}
+
+static __always_inline void update_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, enum zone_type zid,
+ long nr_pages)
+{
+ __update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
@@ -66,11 +76,6 @@ static __always_inline void __folio_clear_lru_flags(struct folio *folio)
__folio_clear_unevictable(folio);
}
-static __always_inline void __clear_page_lru_flags(struct page *page)
-{
- __folio_clear_lru_flags(page_folio(page));
-}
-
/**
* folio_lru_list - Which LRU list should a folio be on?
* @folio: The folio to test.
@@ -94,11 +99,224 @@ static __always_inline enum lru_list folio_lru_list(struct folio *folio)
return lru;
}
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#else
+static inline bool lru_gen_enabled(void)
+{
+ DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
+
+ return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
+}
+#endif
+
+static inline bool lru_gen_in_fault(void)
+{
+ return current->in_lru_fault;
+}
+
+static inline int lru_gen_from_seq(unsigned long seq)
+{
+ return seq % MAX_NR_GENS;
+}
+
+static inline int lru_hist_from_seq(unsigned long seq)
+{
+ return seq % NR_HIST_GENS;
+}
+
+static inline int lru_tier_from_refs(int refs)
+{
+ VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
+
+ /* see the comment in folio_lru_refs() */
+ return order_base_2(refs + 1);
+}
+
+static inline int folio_lru_refs(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+ bool workingset = flags & BIT(PG_workingset);
+
+ /*
+ * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
+ * total number of accesses is N>1, since N=0,1 both map to the first
+ * tier. lru_tier_from_refs() will account for this off-by-one. Also see
+ * the comment on MAX_NR_TIERS.
+ */
+ return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
+}
+
+static inline int folio_lru_gen(struct folio *folio)
+{
+ unsigned long flags = READ_ONCE(folio->flags);
+
+ return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
+{
+ unsigned long max_seq = lruvec->lrugen.max_seq;
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+
+ /* see the comment on MIN_NR_GENS */
+ return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
+}
+
+static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
+
+ if (old_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
+ lrugen->nr_pages[old_gen][type][zone] - delta);
+ if (new_gen >= 0)
+ WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
+ lrugen->nr_pages[new_gen][type][zone] + delta);
+
+ /* addition */
+ if (old_gen < 0) {
+ if (lru_gen_is_active(lruvec, new_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ return;
+ }
+
+ /* deletion */
+ if (new_gen < 0) {
+ if (lru_gen_is_active(lruvec, old_gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, -delta);
+ return;
+ }
+
+ /* promotion */
+ if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
+ __update_lru_size(lruvec, lru, zone, -delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
+ }
+
+ /* demotion requires isolation, e.g., lru_deactivate_fn() */
+ VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long seq;
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
+
+ if (folio_test_unevictable(folio) || !lrugen->enabled)
+ return false;
+ /*
+ * There are three common cases for this page:
+ * 1. If it's hot, e.g., freshly faulted in or previously hot and
+ * migrated, add it to the youngest generation.
+ * 2. If it's cold but can't be evicted immediately, i.e., an anon page
+ * not in swapcache or a dirty page pending writeback, add it to the
+ * second oldest generation.
+ * 3. Everything else (clean, cold) is added to the oldest generation.
+ */
+ if (folio_test_active(folio))
+ seq = lrugen->max_seq;
+ else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
+ (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio))))
+ seq = lrugen->min_seq[type] + 1;
+ else
+ seq = lrugen->min_seq[type];
+
+ gen = lru_gen_from_seq(seq);
+ flags = (gen + 1UL) << LRU_GEN_PGOFF;
+ /* see the comment on MIN_NR_GENS about PG_active */
+ set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
+
+ lru_gen_update_size(lruvec, folio, -1, gen);
+ /* for folio_rotate_reclaimable() */
+ if (reclaiming)
+ list_add_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+ else
+ list_add(&folio->lru, &lrugen->lists[gen][type][zone]);
+
+ return true;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ unsigned long flags;
+ int gen = folio_lru_gen(folio);
+
+ if (gen < 0)
+ return false;
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+
+ /* for folio_migrate_flags() */
+ flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
+ flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
+ gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+
+ lru_gen_update_size(lruvec, folio, gen, -1);
+ list_del(&folio->lru);
+
+ return true;
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline bool lru_gen_enabled(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_in_fault(void)
+{
+ return false;
+}
+
+static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ return false;
+}
+
+#endif /* CONFIG_LRU_GEN */
+
static __always_inline
void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_add_folio(lruvec, folio, false))
+ return;
+
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
if (lru != LRU_UNEVICTABLE)
@@ -116,23 +334,23 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_add_folio(lruvec, folio, true))
+ return;
+
update_lru_size(lruvec, lru, folio_zonenum(folio),
folio_nr_pages(folio));
/* This is not expected to be used on LRU_UNEVICTABLE */
list_add_tail(&folio->lru, &lruvec->lists[lru]);
}
-static __always_inline void add_page_to_lru_list_tail(struct page *page,
- struct lruvec *lruvec)
-{
- lruvec_add_folio_tail(lruvec, page_folio(page));
-}
-
static __always_inline
void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
{
enum lru_list lru = folio_lru_list(folio);
+ if (lru_gen_del_folio(lruvec, folio, false))
+ return;
+
if (lru != LRU_UNEVICTABLE)
list_del(&folio->lru);
update_lru_size(lruvec, lru, folio_zonenum(folio),
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cf97f3884fda..500e536796ca 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -223,6 +224,18 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_KMSAN
+ /*
+ * KMSAN metadata for this page:
+ * - shadow page: every bit indicates whether the corresponding
+ * bit of the original page is initialized (0) or not (1);
+ * - origin page: every 4 bytes contain an id of the stack trace
+ * where the uninitialized value was created.
+ */
+ struct page *kmsan_shadow;
+ struct page *kmsan_origin;
+#endif
+
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
@@ -244,6 +257,13 @@ struct page {
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
+ * @_flags_1: For large folios, additional page flags.
+ * @__head: Points to the folio. Do not use.
+ * @_folio_dtor: Which destructor to use for this folio.
+ * @_folio_order: Do not use directly, call folio_order().
+ * @_total_mapcount: Do not use directly, call folio_entire_mapcount().
+ * @_pincount: Do not use directly, call folio_maybe_dma_pinned().
+ * @_folio_nr_pages: Do not use directly, call folio_nr_pages().
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -282,9 +302,17 @@ struct folio {
};
struct page page;
};
+ unsigned long _flags_1;
+ unsigned long __head;
+ unsigned char _folio_dtor;
+ unsigned char _folio_order;
+ atomic_t _total_mapcount;
+ atomic_t _pincount;
+#ifdef CONFIG_64BIT
+ unsigned int _folio_nr_pages;
+#endif
};
-static_assert(sizeof(struct page) == sizeof(struct folio));
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
FOLIO_MATCH(flags, flags);
@@ -299,6 +327,19 @@ FOLIO_MATCH(_refcount, _refcount);
FOLIO_MATCH(memcg_data, memcg_data);
#endif
#undef FOLIO_MATCH
+#define FOLIO_MATCH(pg, fl) \
+ static_assert(offsetof(struct folio, fl) == \
+ offsetof(struct page, pg) + sizeof(struct page))
+FOLIO_MATCH(flags, _flags_1);
+FOLIO_MATCH(compound_head, __head);
+FOLIO_MATCH(compound_dtor, _folio_dtor);
+FOLIO_MATCH(compound_order, _folio_order);
+FOLIO_MATCH(compound_mapcount, _total_mapcount);
+FOLIO_MATCH(compound_pincount, _pincount);
+#ifdef CONFIG_64BIT
+FOLIO_MATCH(compound_nr, _folio_nr_pages);
+#endif
+#undef FOLIO_MATCH
static inline atomic_t *folio_mapcount_ptr(struct folio *folio)
{
@@ -407,21 +448,6 @@ struct vm_area_struct {
unsigned long vm_end; /* The first byte after our end address
within vm_mm. */
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
-
- struct rb_node vm_rb;
-
- /*
- * Largest free memory gap in bytes to the left of this VMA.
- * Either between this VMA and vma->vm_prev, or between one of the
- * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
- * get_unmapped_area find a free area of the right size.
- */
- unsigned long rb_subtree_gap;
-
- /* Second cache line starts here. */
-
struct mm_struct *vm_mm; /* The address space we belong to. */
/*
@@ -485,9 +511,7 @@ struct vm_area_struct {
struct kioctx_table;
struct mm_struct {
struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u64 vmacache_seqnum; /* per-thread vmacache */
+ struct maple_tree mm_mt;
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
@@ -501,7 +525,6 @@ struct mm_struct {
unsigned long mmap_compat_legacy_base;
#endif
unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
#ifdef CONFIG_MEMBARRIER
@@ -631,22 +654,22 @@ struct mm_struct {
#endif
#ifdef CONFIG_NUMA_BALANCING
/*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and
- * migrate pages to new nodes if necessary.
+ * numa_next_scan is the next time that PTEs will be remapped
+ * PROT_NONE to trigger NUMA hinting faults; such faults gather
+ * statistics and migrate pages to new nodes if necessary.
*/
unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
+ /* Restart point for scanning and remapping PTEs. */
unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
+ /* numa_scan_seq prevents two threads remapping PTEs. */
int numa_scan_seq;
#endif
/*
* An operation with batched TLB flushing is going on. Anything
* that can move process memory needs to flush the TLB when
- * moving a PROT_NONE or PROT_NUMA mapped page.
+ * moving a PROT_NONE mapped page.
*/
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
@@ -671,7 +694,28 @@ struct mm_struct {
* merging.
*/
unsigned long ksm_merging_pages;
+ /*
+ * Represent how many pages are checked for ksm merging
+ * including merged and not merged.
+ */
+ unsigned long ksm_rmap_items;
+#endif
+#ifdef CONFIG_LRU_GEN
+ struct {
+ /* this mm_struct is on lru_gen_mm_list */
+ struct list_head list;
+ /*
+ * Set when switching to this mm_struct, as a hint of
+ * whether it has been used since the last time per-node
+ * page table walkers cleared the corresponding bits.
+ */
+ unsigned long bitmap;
+#ifdef CONFIG_MEMCG
+ /* points to the memcg of "owner" above */
+ struct mem_cgroup *memcg;
#endif
+ } lru_gen;
+#endif /* CONFIG_LRU_GEN */
} __randomize_layout;
/*
@@ -681,6 +725,7 @@ struct mm_struct {
unsigned long cpu_bitmap[];
};
+#define MM_MT_FLAGS (MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN)
extern struct mm_struct init_mm;
/* Pointer magic because the dynamic array size confuses some compilers. */
@@ -698,6 +743,87 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return (struct cpumask *)&mm->cpu_bitmap;
}
+#ifdef CONFIG_LRU_GEN
+
+struct lru_gen_mm_list {
+ /* mm_struct list for page table walkers */
+ struct list_head fifo;
+ /* protects the list above */
+ spinlock_t lock;
+};
+
+void lru_gen_add_mm(struct mm_struct *mm);
+void lru_gen_del_mm(struct mm_struct *mm);
+#ifdef CONFIG_MEMCG
+void lru_gen_migrate_mm(struct mm_struct *mm);
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+ INIT_LIST_HEAD(&mm->lru_gen.list);
+ mm->lru_gen.bitmap = 0;
+#ifdef CONFIG_MEMCG
+ mm->lru_gen.memcg = NULL;
+#endif
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+ /*
+ * When the bitmap is set, page reclaim knows this mm_struct has been
+ * used since the last time it cleared the bitmap. So it might be worth
+ * walking the page tables of this mm_struct to clear the accessed bit.
+ */
+ WRITE_ONCE(mm->lru_gen.bitmap, -1);
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_add_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_del_mm(struct mm_struct *mm)
+{
+}
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_migrate_mm(struct mm_struct *mm)
+{
+}
+#endif
+
+static inline void lru_gen_init_mm(struct mm_struct *mm)
+{
+}
+
+static inline void lru_gen_use_mm(struct mm_struct *mm)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
+struct vma_iterator {
+ struct ma_state mas;
+};
+
+#define VMA_ITERATOR(name, __mm, __addr) \
+ struct vma_iterator name = { \
+ .mas = { \
+ .tree = &(__mm)->mm_mt, \
+ .index = __addr, \
+ .node = MAS_START, \
+ }, \
+ }
+
+static inline void vma_iter_init(struct vma_iterator *vmi,
+ struct mm_struct *mm, unsigned long addr)
+{
+ vmi->mas.tree = &mm->mm_mt;
+ vmi->mas.index = addr;
+ vmi->mas.node = MAS_START;
+}
+
struct mmu_gather;
extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index c1bc6731125c..0bb4b6da9993 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -25,18 +25,6 @@
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
- * The per task VMA cache array:
- */
-#define VMACACHE_BITS 2
-#define VMACACHE_SIZE (1U << VMACACHE_BITS)
-#define VMACACHE_MASK (VMACACHE_SIZE - 1)
-
-struct vmacache {
- u64 seqnum;
- struct vm_area_struct *vmas[VMACACHE_SIZE];
-};
-
-/*
* When updating this, please also update struct resident_page_types[] in
* kernel/fork.c
*/
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 355d842d2731..5f74891556f3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -24,10 +24,10 @@
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
-#ifndef CONFIG_FORCE_MAX_ZONEORDER
+#ifndef CONFIG_ARCH_FORCE_MAX_ORDER
#define MAX_ORDER 11
#else
-#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
+#define MAX_ORDER CONFIG_ARCH_FORCE_MAX_ORDER
#endif
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
@@ -121,20 +121,6 @@ static inline bool free_area_empty(struct free_area *area, int migratetype)
struct pglist_data;
-/*
- * Add a wild amount of padding here to ensure data fall into separate
- * cachelines. There are very few zone structures in the machine, so space
- * consumption is not a concern here.
- */
-#if defined(CONFIG_SMP)
-struct zone_padding {
- char x[0];
-} ____cacheline_internodealigned_in_smp;
-#define ZONE_PADDING(name) struct zone_padding name;
-#else
-#define ZONE_PADDING(name)
-#endif
-
#ifdef CONFIG_NUMA
enum numa_stat_item {
NUMA_HIT, /* allocated in intended node */
@@ -222,6 +208,7 @@ enum node_stat_item {
#endif
#ifdef CONFIG_NUMA_BALANCING
PGPROMOTE_SUCCESS, /* promote successfully */
+ PGPROMOTE_CANDIDATE, /* candidate pages to promote */
#endif
NR_VM_NODE_STAT_ITEMS
};
@@ -307,6 +294,8 @@ static inline bool is_active_lru(enum lru_list lru)
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
+#define WORKINGSET_ANON 0
+#define WORKINGSET_FILE 1
#define ANON_AND_FILE 2
enum lruvec_flags {
@@ -315,6 +304,207 @@ enum lruvec_flags {
*/
};
+#endif /* !__GENERATING_BOUNDS_H */
+
+/*
+ * Evictable pages are divided into multiple generations. The youngest and the
+ * oldest generation numbers, max_seq and min_seq, are monotonically increasing.
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in folio->flags stores gen+1 while
+ * a page is on one of lrugen->lists[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+ * eviction. The first check takes care of the accessed bit set on the initial
+ * fault; the second check makes sure this page hasn't been used since then.
+ * This process, AKA second chance, requires a minimum of two generations,
+ * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive
+ * LRU, e.g., /proc/vmstat, these two generations are considered active; the
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+ * PG_active is always cleared while a page is on one of lrugen->lists[] so that
+ * the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_folio() and lru_gen_del_folio().
+ *
+ * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits
+ * in folio->flags.
+ */
+#define MIN_NR_GENS 2U
+#define MAX_NR_GENS 4U
+
+/*
+ * Each generation is divided into multiple tiers. A page accessed N times
+ * through file descriptors is in tier order_base_2(N). A page in the first tier
+ * (N=0,1) is marked by PG_referenced unless it was faulted in through page
+ * tables or read ahead. A page in any other tier (N>1) is marked by
+ * PG_referenced and PG_workingset. This implies a minimum of two tiers is
+ * supported without using additional bits in folio->flags.
+ *
+ * In contrast to moving across generations which requires the LRU lock, moving
+ * across tiers only involves atomic operations on folio->flags and therefore
+ * has a negligible cost in the buffered access path. In the eviction path,
+ * comparisons of refaulted/(evicted+protected) from the first tier and the
+ * rest infer whether pages accessed multiple times through file descriptors
+ * are statistically hot and thus worth protecting.
+ *
+ * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the
+ * number of categories of the active/inactive LRU when keeping track of
+ * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in
+ * folio->flags.
+ */
+#define MAX_NR_TIERS 4U
+
+#ifndef __GENERATING_BOUNDS_H
+
+struct lruvec;
+struct page_vma_mapped_walk;
+
+#define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF)
+#define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF)
+
+#ifdef CONFIG_LRU_GEN
+
+enum {
+ LRU_GEN_ANON,
+ LRU_GEN_FILE,
+};
+
+enum {
+ LRU_GEN_CORE,
+ LRU_GEN_MM_WALK,
+ LRU_GEN_NONLEAF_YOUNG,
+ NR_LRU_GEN_CAPS
+};
+
+#define MIN_LRU_BATCH BITS_PER_LONG
+#define MAX_LRU_BATCH (MIN_LRU_BATCH * 64)
+
+/* whether to keep historical stats from evicted generations */
+#ifdef CONFIG_LRU_GEN_STATS
+#define NR_HIST_GENS MAX_NR_GENS
+#else
+#define NR_HIST_GENS 1U
+#endif
+
+/*
+ * The youngest generation number is stored in max_seq for both anon and file
+ * types as they are aged on an equal footing. The oldest generation numbers are
+ * stored in min_seq[] separately for anon and file types as clean file pages
+ * can be evicted regardless of swap constraints.
+ *
+ * Normally anon and file min_seq are in sync. But if swapping is constrained,
+ * e.g., out of swap space, file min_seq is allowed to advance and leave anon
+ * min_seq behind.
+ *
+ * The number of pages in each generation is eventually consistent and therefore
+ * can be transiently negative when reset_batch_size() is pending.
+ */
+struct lru_gen_struct {
+ /* the aging increments the youngest generation number */
+ unsigned long max_seq;
+ /* the eviction increments the oldest generation numbers */
+ unsigned long min_seq[ANON_AND_FILE];
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the exponential moving average of evicted+protected */
+ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS];
+ /* the first tier doesn't need protection, hence the minus one */
+ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1];
+ /* can be modified without holding the LRU lock */
+ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS];
+ /* whether the multi-gen LRU is enabled */
+ bool enabled;
+};
+
+enum {
+ MM_LEAF_TOTAL, /* total leaf entries */
+ MM_LEAF_OLD, /* old leaf entries */
+ MM_LEAF_YOUNG, /* young leaf entries */
+ MM_NONLEAF_TOTAL, /* total non-leaf entries */
+ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */
+ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */
+ NR_MM_STATS
+};
+
+/* double-buffering Bloom filters */
+#define NR_BLOOM_FILTERS 2
+
+struct lru_gen_mm_state {
+ /* set to max_seq after each iteration */
+ unsigned long seq;
+ /* where the current iteration continues (inclusive) */
+ struct list_head *head;
+ /* where the last iteration ended (exclusive) */
+ struct list_head *tail;
+ /* to wait for the last page table walker to finish */
+ struct wait_queue_head wait;
+ /* Bloom filters flip after each iteration */
+ unsigned long *filters[NR_BLOOM_FILTERS];
+ /* the mm stats for debugging */
+ unsigned long stats[NR_HIST_GENS][NR_MM_STATS];
+ /* the number of concurrent page table walkers */
+ int nr_walkers;
+};
+
+struct lru_gen_mm_walk {
+ /* the lruvec under reclaim */
+ struct lruvec *lruvec;
+ /* unstable max_seq from lru_gen_struct */
+ unsigned long max_seq;
+ /* the next address within an mm to scan */
+ unsigned long next_addr;
+ /* to batch promoted pages */
+ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* to batch the mm stats */
+ int mm_stats[NR_MM_STATS];
+ /* total batched items */
+ int batched;
+ bool can_swap;
+ bool force_scan;
+};
+
+void lru_gen_init_lruvec(struct lruvec *lruvec);
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw);
+
+#ifdef CONFIG_MEMCG
+void lru_gen_init_memcg(struct mem_cgroup *memcg);
+void lru_gen_exit_memcg(struct mem_cgroup *memcg);
+#endif
+
+#else /* !CONFIG_LRU_GEN */
+
+static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+}
+
+static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+}
+
+#ifdef CONFIG_MEMCG
+static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+}
+#endif
+
+#endif /* CONFIG_LRU_GEN */
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
/* per lruvec lru_lock for memcg */
@@ -332,6 +522,12 @@ struct lruvec {
unsigned long refaults[ANON_AND_FILE];
/* Various lruvec state flags (enum lruvec_flags) */
unsigned long flags;
+#ifdef CONFIG_LRU_GEN
+ /* evictable pages divided into generations */
+ struct lru_gen_struct lrugen;
+ /* to concurrently iterate lru_gen_mm_list */
+ struct lru_gen_mm_state mm_state;
+#endif
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@@ -369,13 +565,6 @@ enum zone_watermarks {
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
-/*
- * Shift to encode migratetype and order in the same integer, with order
- * in the least significant bits.
- */
-#define NR_PCP_ORDER_WIDTH 8
-#define NR_PCP_ORDER_MASK ((1<<NR_PCP_ORDER_WIDTH) - 1)
-
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
@@ -628,7 +817,7 @@ struct zone {
int initialized;
/* Write-intensive fields used from the page allocator */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
/* free areas of different sizes */
struct free_area free_area[MAX_ORDER];
@@ -640,7 +829,7 @@ struct zone {
spinlock_t lock;
/* Write-intensive fields used by compaction and vmstats. */
- ZONE_PADDING(_pad2_)
+ CACHELINE_PADDING(_pad2_);
/*
* When free pages are below this point, additional steps are taken
@@ -677,7 +866,7 @@ struct zone {
bool contiguous;
- ZONE_PADDING(_pad3_)
+ CACHELINE_PADDING(_pad3_);
/* Zone statistics */
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS];
@@ -747,6 +936,8 @@ static inline bool zone_is_empty(struct zone *zone)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH)
+#define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH)
+#define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
@@ -954,8 +1145,10 @@ typedef struct pglist_data {
atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */
unsigned long nr_reclaim_start; /* nr pages written while throttled
* when throttling started. */
- struct task_struct *kswapd; /* Protected by
- mem_hotplug_begin/done() */
+#ifdef CONFIG_MEMORY_HOTPLUG
+ struct mutex kswapd_lock;
+#endif
+ struct task_struct *kswapd; /* Protected by kswapd_lock */
int kswapd_order;
enum zone_type kswapd_highest_zoneidx;
@@ -983,7 +1176,7 @@ typedef struct pglist_data {
#endif /* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */
- ZONE_PADDING(_pad1_)
+ CACHELINE_PADDING(_pad1_);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
/*
@@ -997,6 +1190,21 @@ typedef struct pglist_data {
struct deferred_split deferred_split_queue;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ /* start time in ms of current promote rate limit period */
+ unsigned int nbp_rl_start;
+ /* number of promote candidate pages at start time of current rate limit period */
+ unsigned long nbp_rl_nr_cand;
+ /* promote threshold in ms */
+ unsigned int nbp_threshold;
+ /* start time in ms of current promote threshold adjustment period */
+ unsigned int nbp_th_start;
+ /*
+ * number of promote candidate pages at stat time of current promote
+ * threshold adjustment period
+ */
+ unsigned long nbp_th_nr_cand;
+#endif
/* Fields commonly accessed by the page reclaim scanner */
/*
@@ -1008,11 +1216,19 @@ typedef struct pglist_data {
unsigned long flags;
- ZONE_PADDING(_pad2_)
+#ifdef CONFIG_LRU_GEN
+ /* kswap mm walk data */
+ struct lru_gen_mm_walk mm_walk;
+#endif
+
+ CACHELINE_PADDING(_pad2_);
/* Per-node vmstats */
struct per_cpu_nodestat __percpu *per_cpu_nodestats;
atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS];
+#ifdef CONFIG_NUMA
+ struct memory_tier __rcu *memtier;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
@@ -1026,11 +1242,6 @@ static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}
-static inline bool pgdat_is_empty(pg_data_t *pgdat)
-{
- return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
-}
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
diff --git a/include/linux/node.h b/include/linux/node.h
index 40d641a8bfb0..427a5975cf40 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -2,15 +2,15 @@
/*
* include/linux/node.h - generic node definition
*
- * This is mainly for topological representation. We define the
- * basic 'struct node' here, which can be embedded in per-arch
+ * This is mainly for topological representation. We define the
+ * basic 'struct node' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/node.c
- * and system devices are handled in drivers/base/sys.c.
+ * and system devices are handled in drivers/base/sys.c.
*
* Nodes are exported via driverfs in the class/node/devices/
- * directory.
+ * directory.
*/
#ifndef _LINUX_NODE_H_
#define _LINUX_NODE_H_
@@ -18,7 +18,6 @@
#include <linux/device.h>
#include <linux/cpumask.h>
#include <linux/list.h>
-#include <linux/workqueue.h>
/**
* struct node_hmem_attrs - heterogeneous memory performance attributes
@@ -84,10 +83,6 @@ static inline void node_set_perf_attrs(unsigned int nid,
struct node {
struct device dev;
struct list_head access_list;
-
-#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS)
- struct work_struct node_work;
-#endif
#ifdef CONFIG_HMEM_REPORTING
struct list_head cache_attrs;
struct device *cache_dev;
@@ -96,7 +91,6 @@ struct node {
struct memory_block;
extern struct node *node_devices[];
-typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
@@ -144,11 +138,6 @@ extern void unregister_memory_block_under_nodes(struct memory_block *mem_blk);
extern int register_memory_node_under_compute_node(unsigned int mem_nid,
unsigned int cpu_nid,
unsigned access);
-
-#ifdef CONFIG_HUGETLBFS
-extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
- node_registration_func_t unregister);
-#endif
#else
static inline void node_dev_init(void)
{
@@ -176,18 +165,8 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
static inline void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
{
}
-
-static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
- node_registration_func_t unreg)
-{
-}
#endif
#define to_node(device) container_of(device, struct node, dev)
-static inline bool node_is_toptier(int node)
-{
- return node_state(node, N_CPU);
-}
-
#endif /* _LINUX_NODE_H_ */
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 0c45fb066caa..378956c93c94 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -493,6 +493,7 @@ static inline int num_node_state(enum node_states state)
#define first_online_node 0
#define first_memory_node 0
#define next_online_node(nid) (MAX_NUMNODES)
+#define next_memory_node(nid) (MAX_NUMNODES)
#define nr_node_ids 1U
#define nr_online_nodes 1U
@@ -504,11 +505,20 @@ static inline int num_node_state(enum node_states state)
static inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
- int w, bit = NUMA_NO_NODE;
+ int w, bit;
w = nodes_weight(*maskp);
- if (w)
+ switch (w) {
+ case 0:
+ bit = NUMA_NO_NODE;
+ break;
+ case 1:
+ bit = first_node(*maskp);
+ break;
+ default:
bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_int() % w);
+ break;
+ }
return bit;
#else
return 0;
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 02d1e7bbd8cd..7d0c9c48a0c5 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -78,15 +78,6 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
}
/*
- * Use this helper if tsk->mm != mm and the victim mm needs a special
- * handling. This is guaranteed to stay true after once set.
- */
-static inline bool mm_is_oom_victim(struct mm_struct *mm)
-{
- return test_bit(MMF_OOM_VICTIM, &mm->flags);
-}
-
-/*
* Checks whether a page fault on the given mm is still reliable.
* This is no longer true if the oom reaper started to reap the
* address space which is reflected by MMF_UNSTABLE flag set in
@@ -106,8 +97,6 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
return 0;
}
-bool __oom_reap_task_mm(struct mm_struct *mm);
-
long oom_badness(struct task_struct *p,
unsigned long totalpages);
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index ef1e3e736e14..7d79818dc065 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -55,7 +55,8 @@
#define SECTIONS_WIDTH 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_SHIFT \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define NODES_WIDTH NODES_SHIFT
#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
#error "Vmemmap: No space for nodes field in page flags"
@@ -89,8 +90,8 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT \
- <= BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
@@ -100,10 +101,15 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
-#if ZONES_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH \
- > BITS_PER_LONG - NR_PAGEFLAGS
+#if ZONES_WIDTH + LRU_GEN_WIDTH + SECTIONS_WIDTH + NODES_WIDTH + \
+ KASAN_TAG_WIDTH + LAST_CPUPID_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error "Not enough bits in page flags"
#endif
+/* see the comment on MAX_NR_TIERS */
+#define LRU_REFS_WIDTH min(__LRU_REFS_WIDTH, BITS_PER_LONG - NR_PAGEFLAGS - \
+ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \
+ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH)
+
#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 465ff35a8c00..0b0ae5084e60 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1058,7 +1058,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
1UL << PG_private | 1UL << PG_private_2 | \
1UL << PG_writeback | 1UL << PG_reserved | \
1UL << PG_slab | 1UL << PG_active | \
- 1UL << PG_unevictable | __PG_MLOCKED)
+ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK)
/*
* Flags checked when a page is prepped for return by the page allocator.
@@ -1069,7 +1069,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
* alloc-free cycle to prevent from reusing the page.
*/
#define PAGE_FLAGS_CHECK_AT_PREP \
- (PAGEFLAGS_MASK & ~__PG_HWPOISON)
+ ((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
index 679591301994..c141ea9a95ef 100644
--- a/include/linux/page_counter.h
+++ b/include/linux/page_counter.h
@@ -3,15 +3,17 @@
#define _LINUX_PAGE_COUNTER_H
#include <linux/atomic.h>
+#include <linux/cache.h>
#include <linux/kernel.h>
#include <asm/page.h>
struct page_counter {
+ /*
+ * Make sure 'usage' does not share cacheline with any other field. The
+ * memcg->memory.usage is a hot member of struct mem_cgroup.
+ */
atomic_long_t usage;
- unsigned long min;
- unsigned long low;
- unsigned long high;
- unsigned long max;
+ CACHELINE_PADDING(_pad1_);
/* effective memory.min and memory.min usage tracking */
unsigned long emin;
@@ -23,18 +25,18 @@ struct page_counter {
atomic_long_t low_usage;
atomic_long_t children_low_usage;
- /* legacy */
unsigned long watermark;
unsigned long failcnt;
- /*
- * 'parent' is placed here to be far from 'usage' to reduce
- * cache false sharing, as 'usage' is written mostly while
- * parent is frequently read for cgroup's hierarchical
- * counting nature.
- */
+ /* Keep all the read most fields in a separete cacheline. */
+ CACHELINE_PADDING(_pad2_);
+
+ unsigned long min;
+ unsigned long low;
+ unsigned long high;
+ unsigned long max;
struct page_counter *parent;
-};
+} ____cacheline_internodealigned_in_smp;
#if BITS_PER_LONG == 32
#define PAGE_COUNTER_MAX LONG_MAX
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index fabb2e1e087f..22be4582faae 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -36,9 +36,15 @@ struct page_ext {
unsigned long flags;
};
+extern bool early_page_ext;
extern unsigned long page_ext_size;
extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+static inline bool early_page_ext_enabled(void)
+{
+ return early_page_ext;
+}
+
#ifdef CONFIG_SPARSEMEM
static inline void page_ext_init_flatmem(void)
{
@@ -55,7 +61,8 @@ static inline void page_ext_init(void)
}
#endif
-struct page_ext *lookup_page_ext(const struct page *page);
+extern struct page_ext *page_ext_get(struct page *page);
+extern void page_ext_put(struct page_ext *page_ext);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@@ -67,13 +74,13 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
-static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
+static inline bool early_page_ext_enabled(void)
{
+ return false;
}
-static inline struct page_ext *lookup_page_ext(const struct page *page)
+static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
- return NULL;
}
static inline void page_ext_init(void)
@@ -87,5 +94,14 @@ static inline void page_ext_init_flatmem_late(void)
static inline void page_ext_init_flatmem(void)
{
}
+
+static inline struct page_ext *page_ext_get(struct page *page)
+{
+ return NULL;
+}
+
+static inline void page_ext_put(struct page_ext *page_ext)
+{
+}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 4663dfed1293..5cb7bd2078ec 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -13,65 +13,79 @@
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-
static inline bool folio_test_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
static inline void folio_set_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
}
static inline bool folio_test_clear_young(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_young;
if (unlikely(!page_ext))
return false;
- return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_young;
}
static inline bool folio_test_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
+ bool page_idle;
if (unlikely(!page_ext))
return false;
- return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
+
+ return page_idle;
}
static inline void folio_set_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
static inline void folio_clear_idle(struct folio *folio)
{
- struct page_ext *page_ext = lookup_page_ext(&folio->page);
+ struct page_ext *page_ext = page_ext_get(&folio->page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
+ page_ext_put(page_ext);
}
#endif /* !CONFIG_64BIT */
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index 83c7248053a1..5f1ae07d724b 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -53,6 +53,10 @@ extern unsigned int pageblock_order;
#endif /* CONFIG_HUGETLB_PAGE */
#define pageblock_nr_pages (1UL << pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
+#define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages)
/* Forward declaration */
struct page;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 201dc7281640..bbccb4044222 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -718,8 +718,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
pgoff_t end, struct folio_batch *fbatch);
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
- unsigned int nr_pages, struct page **pages);
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages);
@@ -989,19 +989,16 @@ static inline int lock_page_killable(struct page *page)
}
/*
- * lock_page_or_retry - Lock the page, unless this would block and the
+ * folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
-static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
- unsigned int flags)
+static inline bool folio_lock_or_retry(struct folio *folio,
+ struct mm_struct *mm, unsigned int flags)
{
- struct folio *folio;
might_sleep();
-
- folio = page_folio(page);
return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}
@@ -1042,7 +1039,6 @@ static inline int wait_on_page_locked_killable(struct page *page)
return folio_wait_locked_killable(page_folio(page));
}
-int folio_put_wait_locked(struct folio *folio, int state);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index ac7b38ad5903..f3fafb731ffd 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -15,12 +15,12 @@ struct mm_walk;
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (lowest-level)
- * entry
+ * @pte_entry: if set, called for each PTE (lowest-level) entry,
+ * including empty ones
* @pte_hole: if set, called for each hole at all levels,
- * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
- * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
- * to 1) are skipped.
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD.
+ * Any folded depths (where PTRS_PER_P?D is equal to 1)
+ * are skipped.
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 014ee8f0fbaa..a108b60a6962 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -213,7 +213,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp)
@@ -234,7 +234,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
BUILD_BUG();
return 0;
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
#endif
#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
@@ -260,6 +260,19 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_pte_young
+/*
+ * Return whether the accessed bit is supported on the local CPU.
+ *
+ * This stub assumes accessing through an old PTE triggers a page fault.
+ * Architectures that automatically set the access bit should overwrite it.
+ */
+static inline bool arch_has_hw_pte_young(void)
+{
+ return false;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -1276,8 +1289,7 @@ static inline int pgd_devmap(pgd_t pgd)
#endif
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
- (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
+ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
{
return 0;
@@ -1598,11 +1610,7 @@ typedef unsigned int pgtbl_mod_mask;
#endif
#ifndef has_transparent_hugepage
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define has_transparent_hugepage() 1
-#else
-#define has_transparent_hugepage() 0
-#endif
+#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
/*
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b89b4b86951f..bd3504d11b15 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -166,7 +166,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
unlink_anon_vmas(next);
}
-struct anon_vma *page_get_anon_vma(struct page *page);
+struct anon_vma *folio_get_anon_vma(struct folio *folio);
/* RMAP flags, currently only relevant for some anon rmap operations. */
typedef int __bitwise rmap_t;
@@ -270,7 +270,7 @@ dup:
* @page: the exclusive anonymous page to try marking possibly shared
*
* The caller needs to hold the PT lock and has to have the page table entry
- * cleared/invalidated+flushed, to properly sync against GUP-fast.
+ * cleared/invalidated.
*
* This is similar to page_try_dup_anon_rmap(), however, not used during fork()
* to duplicate a mapping, but instead to prepare for KSM or temporarily
@@ -286,12 +286,68 @@ static inline int page_try_share_anon_rmap(struct page *page)
{
VM_BUG_ON_PAGE(!PageAnon(page) || !PageAnonExclusive(page), page);
- /* See page_try_dup_anon_rmap(). */
- if (likely(!is_device_private_page(page) &&
- unlikely(page_maybe_dma_pinned(page))))
- return -EBUSY;
+ /* device private pages cannot get pinned via GUP. */
+ if (unlikely(is_device_private_page(page))) {
+ ClearPageAnonExclusive(page);
+ return 0;
+ }
+
+ /*
+ * We have to make sure that when we clear PageAnonExclusive, that
+ * the page is not pinned and that concurrent GUP-fast won't succeed in
+ * concurrently pinning the page.
+ *
+ * Conceptually, PageAnonExclusive clearing consists of:
+ * (A1) Clear PTE
+ * (A2) Check if the page is pinned; back off if so.
+ * (A3) Clear PageAnonExclusive
+ * (A4) Restore PTE (optional, but certainly not writable)
+ *
+ * When clearing PageAnonExclusive, we cannot possibly map the page
+ * writable again, because anon pages that may be shared must never
+ * be writable. So in any case, if the PTE was writable it cannot
+ * be writable anymore afterwards and there would be a PTE change. Only
+ * if the PTE wasn't writable, there might not be a PTE change.
+ *
+ * Conceptually, GUP-fast pinning of an anon page consists of:
+ * (B1) Read the PTE
+ * (B2) FOLL_WRITE: check if the PTE is not writable; back off if so.
+ * (B3) Pin the mapped page
+ * (B4) Check if the PTE changed by re-reading it; back off if so.
+ * (B5) If the original PTE is not writable, check if
+ * PageAnonExclusive is not set; back off if so.
+ *
+ * If the PTE was writable, we only have to make sure that GUP-fast
+ * observes a PTE change and properly backs off.
+ *
+ * If the PTE was not writable, we have to make sure that GUP-fast either
+ * detects a (temporary) PTE change or that PageAnonExclusive is cleared
+ * and properly backs off.
+ *
+ * Consequently, when clearing PageAnonExclusive(), we have to make
+ * sure that (A1), (A2)/(A3) and (A4) happen in the right memory
+ * order. In GUP-fast pinning code, we have to make sure that (B3),(B4)
+ * and (B5) happen in the right memory order.
+ *
+ * We assume that there might not be a memory barrier after
+ * clearing/invalidating the PTE (A1) and before restoring the PTE (A4),
+ * so we use explicit ones here.
+ */
+ /* Paired with the memory barrier in try_grab_folio(). */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb();
+
+ if (unlikely(page_maybe_dma_pinned(page)))
+ return -EBUSY;
ClearPageAnonExclusive(page);
+
+ /*
+ * This is conceptually a smp_wmb() paired with the smp_rmb() in
+ * gup_must_unshare().
+ */
+ if (IS_ENABLED(CONFIG_HAVE_FAST_GUP))
+ smp_mb__after_atomic();
return 0;
}
@@ -405,13 +461,8 @@ struct rmap_walk_control {
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
-
-/*
- * Called by memory-failure.c to kill processes.
- */
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
struct rmap_walk_control *rwc);
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
#else /* !CONFIG_MMU */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 11a317810566..77f68f8b795c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -14,6 +14,7 @@
#include <linux/pid.h>
#include <linux/sem.h>
#include <linux/shm.h>
+#include <linux/kmsan_types.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
@@ -870,7 +871,6 @@ struct task_struct {
struct mm_struct *active_mm;
/* Per-thread vma caching: */
- struct vmacache vmacache;
#ifdef SPLIT_RSS_COUNTING
struct task_rss_stat rss_stat;
@@ -923,6 +923,10 @@ struct task_struct {
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
#endif
+#ifdef CONFIG_LRU_GEN
+ /* whether the LRU algorithm may apply to this access */
+ unsigned in_lru_fault:1;
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -953,6 +957,10 @@ struct task_struct {
#ifdef CONFIG_CPU_SUP_INTEL
unsigned reported_split_lock:1;
#endif
+#ifdef CONFIG_TASK_DELAY_ACCT
+ /* delay due to memory thrashing */
+ unsigned in_thrashing:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -1364,6 +1372,10 @@ struct task_struct {
#endif
#endif
+#ifdef CONFIG_KMSAN
+ struct kmsan_ctx kmsan_ctx;
+#endif
+
#if IS_ENABLED(CONFIG_KUNIT)
struct kunit *kunit_test;
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 4d0a5be28b70..8270ad7ae14c 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,9 +71,8 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
-#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
-#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
-#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
+#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
+#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
* MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either
* replaced in the future by mm.pinned_vm when it becomes stable, or grow into
@@ -81,7 +80,7 @@ static inline int get_dumpable(struct mm_struct *mm)
* pinned pages were unpinned later on, we'll still keep this bit set for the
* lifecycle of this mm, just for simplicity.
*/
-#define MMF_HAS_PINNED 28 /* FOLL_PIN has run, never cleared */
+#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index e650946816d0..303ee7dd0c7e 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -27,6 +27,7 @@ enum sched_tunable_scaling {
#ifdef CONFIG_NUMA_BALANCING
extern int sysctl_numa_balancing_mode;
+extern unsigned int sysctl_numa_balancing_promote_rate_limit;
#else
#define sysctl_numa_balancing_mode 0
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index ff0b990de83d..d500ea967dc7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -92,17 +92,19 @@ extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
-extern bool shmem_is_huge(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index);
-static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
+extern bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ pgoff_t index, bool shmem_huge_force);
+static inline bool shmem_huge_enabled(struct vm_area_struct *vma,
+ bool shmem_huge_force)
{
- return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff);
+ return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff,
+ shmem_huge_force);
}
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
-/* Flag allocation requirements to shmem_getpage */
+/* Flag allocation requirements to shmem_get_folio */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */
@@ -111,8 +113,8 @@ enum sgp_type {
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
-extern int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp);
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp);
static inline struct page *shmem_read_mapping_page(
struct address_space *mapping, pgoff_t index)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6a613e65e78d..90877fcde70b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -108,7 +108,7 @@
# define SLAB_ACCOUNT 0
#endif
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_GENERIC
#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
#else
#define SLAB_KASAN 0
@@ -121,6 +121,12 @@
*/
#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
+#ifdef CONFIG_KFENCE
+#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
+#else
+#define SLAB_SKIP_KFENCE 0
+#endif
+
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index bc2797955de9..9ca7798d7a31 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -14,9 +14,15 @@
#include <linux/gfp.h>
typedef u32 depot_stack_handle_t;
+/*
+ * Number of bits in the handle that stack depot doesn't use. Users may store
+ * information in them.
+ */
+#define STACK_DEPOT_EXTRA_BITS 5
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
+ unsigned int extra_bits,
gfp_t gfp_flags, bool can_alloc);
/*
@@ -59,6 +65,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries);
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
+
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 43150b9bbc5c..a18cf4b7c724 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -162,6 +162,10 @@ union swap_header {
*/
struct reclaim_state {
unsigned long reclaimed_slab;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
#ifdef __KERNEL__
@@ -351,6 +355,11 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
return entry;
}
+static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
+{
+ folio->private = (void *)entry.val;
+}
+
/* linux/mm/workingset.c */
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
@@ -375,11 +384,11 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */
-extern void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_pages);
-extern void lru_note_cost_folio(struct folio *);
-extern void folio_add_lru(struct folio *);
-extern void lru_cache_add(struct page *);
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
+void lru_note_cost_folio(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void lru_cache_add(struct page *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
@@ -481,7 +490,8 @@ static inline long get_nr_swap_pages(void)
extern void si_swapinfo(struct sysinfo *);
swp_entry_t folio_alloc_swap(struct folio *folio);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -500,7 +510,6 @@ extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
@@ -566,7 +575,7 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
}
@@ -585,11 +594,6 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int try_to_free_swap(struct page *page)
-{
- return 0;
-}
-
static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
@@ -597,6 +601,11 @@ static inline swp_entry_t folio_alloc_swap(struct folio *folio)
return entry;
}
+static inline bool folio_free_swap(struct folio *folio)
+{
+ return false;
+}
+
static inline int add_swap_extent(struct swap_info_struct *sis,
unsigned long start_page,
unsigned long nr_pages, sector_t start_block)
@@ -657,7 +666,7 @@ static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
cgroup_throttle_swaprate(&folio->page, gfp);
}
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
static inline int mem_cgroup_try_charge_swap(struct folio *folio,
@@ -677,7 +686,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p
}
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
#else
static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
@@ -699,7 +708,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
index a12dd1c3966c..ae73a87775b3 100644
--- a/include/linux/swap_cgroup.h
+++ b/include/linux/swap_cgroup.h
@@ -4,7 +4,7 @@
#include <linux/swap.h>
-#ifdef CONFIG_MEMCG_SWAP
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
unsigned short old, unsigned short new);
@@ -40,6 +40,6 @@ static inline void swap_cgroup_swapoff(int type)
return;
}
-#endif /* CONFIG_MEMCG_SWAP */
+#endif
#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 54078542134c..7ed529a77c5b 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -8,6 +8,11 @@
*/
extern struct swap_info_struct *swap_info[];
extern unsigned long generic_max_swapfile_size(void);
-extern unsigned long max_swapfile_size(void);
+unsigned long arch_max_swapfile_size(void);
+
+/* Maximum swapfile size supported for the arch (not inclusive). */
+extern unsigned long swapfile_maximum_size;
+/* Whether swap migration entry supports storing A/D bits for the arch */
+extern bool swap_migration_ad_supported;
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index a3d435bf9f97..86b95ccb81bb 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -8,6 +8,10 @@
#ifdef CONFIG_MMU
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
* get good packing density in that tree, so the index should be dense in
@@ -23,6 +27,45 @@
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
+/*
+ * Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
+ * store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
+ * can use the extra bits to store other information besides PFN.
+ */
+#ifdef MAX_PHYSMEM_BITS
+#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
+#else /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT)
+#endif /* MAX_PHYSMEM_BITS */
+#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
+
+/**
+ * Migration swap entry specific bitfield definitions. Layout:
+ *
+ * |----------+--------------------|
+ * | swp_type | swp_offset |
+ * |----------+--------+-+-+-------|
+ * | | resv |D|A| PFN |
+ * |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset. By default we'll ignore A/D bits
+ * when migrating a page. Please refer to migration_entry_supports_ad()
+ * for more information. If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
+
+static inline bool is_pfn_swap_entry(swp_entry_t entry);
+
/* Clear all flags but only keep swp_entry_t related information */
static inline pte_t pte_swp_clear_flags(pte_t pte)
{
@@ -64,6 +107,17 @@ static inline pgoff_t swp_offset(swp_entry_t entry)
return entry.val & SWP_OFFSET_MASK;
}
+/*
+ * This should only be called upon a pfn swap entry to get the PFN stored
+ * in the swap entry. Please refers to is_pfn_swap_entry() for definition
+ * of pfn swap entry.
+ */
+static inline unsigned long swp_offset_pfn(swp_entry_t entry)
+{
+ VM_BUG_ON(!is_pfn_swap_entry(entry));
+ return swp_offset(entry) & SWP_PFN_MASK;
+}
+
/* check whether a pte points to a swap entry */
static inline int is_swap_pte(pte_t pte)
{
@@ -240,6 +294,52 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
return swp_entry(SWP_MIGRATION_WRITE, offset);
}
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations. The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
+{
+#ifdef CONFIG_SWAP
+ return swap_migration_ad_supported;
+#else /* CONFIG_SWAP */
+ return false;
+#endif /* CONFIG_SWAP */
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_YOUNG);
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_YOUNG;
+ /* Keep the old behavior of aging page after migration */
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_entry(swp_type(entry),
+ swp_offset(entry) | SWP_MIG_DIRTY);
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ if (migration_entry_supports_ad())
+ return swp_offset(entry) & SWP_MIG_DIRTY;
+ /* Keep the old behavior of clean page after migration */
+ return false;
+}
+
extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
@@ -247,8 +347,8 @@ extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
#ifdef CONFIG_HUGETLB_PAGE
extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl);
extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte);
-#endif
-#else
+#endif /* CONFIG_HUGETLB_PAGE */
+#else /* CONFIG_MIGRATION */
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
{
return swp_entry(0, 0);
@@ -276,7 +376,7 @@ static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
#ifdef CONFIG_HUGETLB_PAGE
static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { }
-#endif
+#endif /* CONFIG_HUGETLB_PAGE */
static inline int is_writable_migration_entry(swp_entry_t entry)
{
return 0;
@@ -286,7 +386,26 @@ static inline int is_readable_migration_entry(swp_entry_t entry)
return 0;
}
-#endif
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+ return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+ return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+ return false;
+}
+#endif /* CONFIG_MIGRATION */
typedef unsigned long pte_marker;
@@ -369,7 +488,7 @@ static inline int pte_none_mostly(pte_t pte)
static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
{
- struct page *p = pfn_to_page(swp_offset(entry));
+ struct page *p = pfn_to_page(swp_offset_pfn(entry));
/*
* Any use of migration entries may only occur while the
@@ -387,6 +506,9 @@ static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
*/
static inline bool is_pfn_swap_entry(swp_entry_t entry)
{
+ /* Make sure the swp offset can always store the needed fields */
+ BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
+
return is_migration_entry(entry) || is_device_private_entry(entry) ||
is_device_exclusive_entry(entry);
}
@@ -426,7 +548,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return is_swap_pmd(pmd) && is_migration_entry(pmd_to_swp_entry(pmd));
}
-#else
+#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
struct page *page)
{
@@ -455,7 +577,7 @@ static inline int is_pmd_migration_entry(pmd_t pmd)
{
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
#ifdef CONFIG_MEMORY_FAILURE
@@ -475,27 +597,17 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
-static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
-{
- return swp_offset(entry);
-}
-
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
}
-static inline void num_poisoned_pages_dec(void)
-{
- atomic_long_dec(&num_poisoned_pages);
-}
-
static inline void num_poisoned_pages_sub(long i)
{
atomic_long_sub(i, &num_poisoned_pages);
}
-#else
+#else /* CONFIG_MEMORY_FAILURE */
static inline swp_entry_t make_hwpoison_entry(struct page *page)
{
@@ -514,7 +626,7 @@ static inline void num_poisoned_pages_inc(void)
static inline void num_poisoned_pages_sub(long i)
{
}
-#endif
+#endif /* CONFIG_MEMORY_FAILURE */
static inline int non_swap_entry(swp_entry_t entry)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 47e5d374c7eb..afb18f198843 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -58,20 +58,28 @@
static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
{
- instrument_copy_from_user(to, from, n);
+ unsigned long res;
+
+ instrument_copy_from_user_before(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
static __always_inline __must_check unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res;
+
might_fault();
+ instrument_copy_from_user_before(to, from, n);
if (should_fail_usercopy())
return n;
- instrument_copy_from_user(to, from, n);
check_object_size(to, n, false);
- return raw_copy_from_user(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
+ return res;
}
/**
@@ -115,8 +123,9 @@ _copy_from_user(void *to, const void __user *from, unsigned long n)
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- instrument_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index e1b8a915e9e9..f07e6998bb68 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -175,9 +175,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
-extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct list_head *uf);
+extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf);
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
@@ -258,7 +257,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma,
return true;
}
-static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
+static inline int userfaultfd_unmap_prep(struct mm_struct *mm,
unsigned long start, unsigned long end,
struct list_head *uf)
{
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index f3fc36cd2276..3518dba1e02f 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -129,10 +129,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
#endif /* CONFIG_DEBUG_TLBFLUSH */
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- VMACACHE_FIND_CALLS,
- VMACACHE_FIND_HITS,
-#endif
#ifdef CONFIG_SWAP
SWAP_RA,
SWAP_RA_HIT,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
deleted file mode 100644
index 6fce268a4588..000000000000
--- a/include/linux/vmacache.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_VMACACHE_H
-#define __LINUX_VMACACHE_H
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-
-static inline void vmacache_flush(struct task_struct *tsk)
-{
- memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
-}
-
-extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
-extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
- unsigned long addr);
-
-#ifndef CONFIG_MMU
-extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
-#endif
-
-static inline void vmacache_invalidate(struct mm_struct *mm)
-{
- mm->vmacache_seqnum++;
-}
-
-#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index bfe38869498d..19cf5b6892ce 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -125,12 +125,6 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
#endif
-#ifdef CONFIG_DEBUG_VM_VMACACHE
-#define count_vm_vmacache_event(x) count_vm_event(x)
-#else
-#define count_vm_vmacache_event(x) do {} while (0)
-#endif
-
#define __count_zid_vm_events(item, zid, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3f045f6d6c4f..06f9291b6fd5 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -17,20 +17,12 @@ struct bio;
DECLARE_PER_CPU(int, dirty_throttle_leaks);
/*
- * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
- *
- * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
- *
- * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
- * time) for the dirty pages to drop, unless written enough pages.
- *
* The global dirty threshold is normally equal to the global dirty limit,
* except when the system suddenly allocates a lot of anonymous memory and
* knocks down the global dirty threshold quickly, in which case the global
* dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
*/
#define DIRTY_SCOPE 8
-#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
struct backing_dev_info;
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index d651f3437367..935af4947917 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -11,11 +11,14 @@
EM( SCAN_FAIL, "failed") \
EM( SCAN_SUCCEED, "succeeded") \
EM( SCAN_PMD_NULL, "pmd_null") \
+ EM( SCAN_PMD_NONE, "pmd_none") \
+ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \
EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \
EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \
EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \
+ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \
EM( SCAN_PAGE_RO, "no_writable_page") \
EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \
EM( SCAN_PAGE_NULL, "page_null") \
@@ -166,5 +169,39 @@ TRACE_EVENT(mm_collapse_huge_page_swapin,
__entry->ret)
);
+TRACE_EVENT(mm_khugepaged_scan_file,
+
+ TP_PROTO(struct mm_struct *mm, struct page *page, const char *filename,
+ int present, int swap, int result),
+
+ TP_ARGS(mm, page, filename, present, swap, result),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, pfn)
+ __string(filename, filename)
+ __field(int, present)
+ __field(int, swap)
+ __field(int, result)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->pfn = page ? page_to_pfn(page) : -1;
+ __assign_str(filename, filename);
+ __entry->present = present;
+ __entry->swap = swap;
+ __entry->result = result;
+ ),
+
+ TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s",
+ __entry->mm,
+ __entry->pfn,
+ __get_str(filename),
+ __entry->present,
+ __entry->swap,
+ __print_symbolic(__entry->result, SCAN_STATUS))
+);
+
#endif /* __HUGE_MEMORY_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/maple_tree.h b/include/trace/events/maple_tree.h
new file mode 100644
index 000000000000..2be403bdc2bd
--- /dev/null
+++ b/include/trace/events/maple_tree.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM maple_tree
+
+#if !defined(_TRACE_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MM_H
+
+
+#include <linux/tracepoint.h>
+
+struct ma_state;
+
+TRACE_EVENT(ma_op,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+TRACE_EVENT(ma_read,
+
+ TP_PROTO(const char *fn, struct ma_state *mas),
+
+ TP_ARGS(fn, mas),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode: %p (%lu %lu) range: %lu-%lu",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last
+ )
+)
+
+TRACE_EVENT(ma_write,
+
+ TP_PROTO(const char *fn, struct ma_state *mas, unsigned long piv,
+ void *val),
+
+ TP_ARGS(fn, mas, piv, val),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(unsigned long, min)
+ __field(unsigned long, max)
+ __field(unsigned long, index)
+ __field(unsigned long, last)
+ __field(unsigned long, piv)
+ __field(void *, val)
+ __field(void *, node)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->min = mas->min;
+ __entry->max = mas->max;
+ __entry->index = mas->index;
+ __entry->last = mas->last;
+ __entry->piv = piv;
+ __entry->val = val;
+ __entry->node = mas->node;
+ ),
+
+ TP_printk("%s\tNode %p (%lu %lu) range:%lu-%lu piv (%lu) val %p",
+ __entry->fn,
+ (void *) __entry->node,
+ (unsigned long) __entry->min,
+ (unsigned long) __entry->max,
+ (unsigned long) __entry->index,
+ (unsigned long) __entry->last,
+ (unsigned long) __entry->piv,
+ (void *) __entry->val
+ )
+)
+#endif /* _TRACE_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index 4661f7ba07c0..216de5f03621 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -42,6 +42,79 @@ TRACE_EVENT(vm_unmapped_area,
__entry->low_limit, __entry->high_limit, __entry->align_mask,
__entry->align_offset)
);
+
+TRACE_EVENT(vma_mas_szero,
+ TP_PROTO(struct maple_tree *mt, unsigned long start,
+ unsigned long end),
+
+ TP_ARGS(mt, start, end),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->start = start;
+ __entry->end = end;
+ ),
+
+ TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
+ __entry->mt,
+ (unsigned long) __entry->start,
+ (unsigned long) __entry->end
+ )
+);
+
+TRACE_EVENT(vma_store,
+ TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
+
+ TP_ARGS(mt, vma),
+
+ TP_STRUCT__entry(
+ __field(struct maple_tree *, mt)
+ __field(struct vm_area_struct *, vma)
+ __field(unsigned long, vm_start)
+ __field(unsigned long, vm_end)
+ ),
+
+ TP_fast_assign(
+ __entry->mt = mt;
+ __entry->vma = vma;
+ __entry->vm_start = vma->vm_start;
+ __entry->vm_end = vma->vm_end - 1;
+ ),
+
+ TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
+ __entry->mt, __entry->vma,
+ (unsigned long) __entry->vm_start,
+ (unsigned long) __entry->vm_end
+ )
+);
+
+
+TRACE_EVENT(exit_mmap,
+ TP_PROTO(struct mm_struct *mm),
+
+ TP_ARGS(mm),
+
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(struct maple_tree *, mt)
+ ),
+
+ TP_fast_assign(
+ __entry->mm = mm;
+ __entry->mt = &mm->mm_mt;
+ ),
+
+ TP_printk("mt_mod %p, DESTROY\n",
+ __entry->mt
+ )
+);
+
#endif
/* This part must be outside protection */
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index 4f3d5aaa11f5..de687009bfe5 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -20,18 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_16KB (14 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index 6c1aa92a92e4..6ce1f1ceb432 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -77,6 +77,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 7d32b1e797fb..005e5e306266 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -12,6 +12,10 @@
#include <linux/types.h>
+/* ioctls for /dev/userfaultfd */
+#define USERFAULTFD_IOC 0xAA
+#define USERFAULTFD_IOC_NEW _IO(USERFAULTFD_IOC, 0x00)
+
/*
* If the UFFDIO_API is upgraded someday, the UFFDIO_UNREGISTER and
* UFFDIO_WAKE ioctls should be defined as _IOW and not as _IOR. In
diff --git a/init/Kconfig b/init/Kconfig
index 751ca3dd1d22..d6c4302b1c08 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -969,11 +969,6 @@ config MEMCG
help
Provides control over the memory footprint of tasks in a cgroup.
-config MEMCG_SWAP
- bool
- depends on MEMCG && SWAP
- default y
-
config MEMCG_KMEM
bool
depends on MEMCG && !SLOB
diff --git a/init/main.c b/init/main.c
index 0866e5d0d467..61d735ba2ffb 100644
--- a/init/main.c
+++ b/init/main.c
@@ -34,6 +34,7 @@
#include <linux/percpu.h>
#include <linux/kmod.h>
#include <linux/kprobes.h>
+#include <linux/kmsan.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
@@ -117,6 +118,7 @@ static int kernel_init(void *);
extern void init_IRQ(void);
extern void radix_tree_init(void);
+extern void maple_tree_init(void);
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
@@ -836,6 +838,7 @@ static void __init mm_init(void)
init_mem_debugging_and_hardening();
kfence_alloc_pool();
report_meminit();
+ kmsan_init_shadow();
stack_depot_early_init();
mem_init();
mem_init_print_info();
@@ -849,10 +852,14 @@ static void __init mm_init(void)
pgtable_init();
debug_objects_mem_init();
vmalloc_init();
+ /* Should be run after vmap initialization */
+ if (early_page_ext_enabled())
+ page_ext_init();
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
/* Should be run after espfix64 is set up. */
pti_init();
+ kmsan_init_runtime();
}
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
@@ -1005,6 +1012,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
radix_tree_init();
+ maple_tree_init();
/*
* Set up housekeeping before setting up workqueues to allow the unbound
@@ -1617,7 +1625,8 @@ static noinline void __init kernel_init_freeable(void)
padata_init();
page_alloc_init_late();
/* Initialize page ext after all struct pages are initialized. */
- page_ext_init();
+ if (!early_page_ext_enabled())
+ page_ext_init();
do_basic_setup();
diff --git a/ipc/shm.c b/ipc/shm.c
index b3048ebd5c31..7d86f058fb86 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1721,7 +1721,7 @@ long ksys_shmdt(char __user *shmaddr)
#ifdef CONFIG_MMU
loff_t size = 0;
struct file *file;
- struct vm_area_struct *next;
+ VMA_ITERATOR(vmi, mm, addr);
#endif
if (addr & ~PAGE_MASK)
@@ -1751,12 +1751,9 @@ long ksys_shmdt(char __user *shmaddr)
* match the usual checks anyway. So assume all vma's are
* above the starting address given.
*/
- vma = find_vma(mm, addr);
#ifdef CONFIG_MMU
- while (vma) {
- next = vma->vm_next;
-
+ for_each_vma(vmi, vma) {
/*
* Check if the starting address would match, i.e. it's
* a fragment created by mprotect() and/or munmap(), or it
@@ -1774,6 +1771,7 @@ long ksys_shmdt(char __user *shmaddr)
file = vma->vm_file;
size = i_size_read(file_inode(vma->vm_file));
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
+ mas_pause(&vmi.mas);
/*
* We discovered the size of the shm segment, so
* break out of here and fall through to the next
@@ -1781,10 +1779,9 @@ long ksys_shmdt(char __user *shmaddr)
* searching for matching vma's.
*/
retval = 0;
- vma = next;
+ vma = vma_next(&vmi);
break;
}
- vma = next;
}
/*
@@ -1794,17 +1791,19 @@ long ksys_shmdt(char __user *shmaddr)
*/
size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
- next = vma->vm_next;
-
/* finding a matching vma now does not alter retval */
if ((vma->vm_ops == &shm_vm_ops) &&
((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
- (vma->vm_file == file))
+ (vma->vm_file == file)) {
do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL);
- vma = next;
+ mas_pause(&vmi.mas);
+ }
+
+ vma = vma_next(&vmi);
}
#else /* CONFIG_MMU */
+ vma = vma_lookup(mm, addr);
/* under NOMMU conditions, the exact address to be destroyed must be
* given
*/
diff --git a/kernel/Makefile b/kernel/Makefile
index 318789c728d3..d754e0be1176 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -38,6 +38,7 @@ KCOV_INSTRUMENT_kcov.o := n
KASAN_SANITIZE_kcov.o := n
KCSAN_SANITIZE_kcov.o := n
UBSAN_SANITIZE_kcov.o := n
+KMSAN_SANITIZE_kcov.o := n
CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack) -fno-stack-protector
# Don't instrument error handlers
diff --git a/kernel/acct.c b/kernel/acct.c
index 13706356ec54..62200d799b9b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -555,15 +555,14 @@ void acct_collect(long exitcode, int group_dead)
unsigned long vsize = 0;
if (group_dead && current->mm) {
+ struct mm_struct *mm = current->mm;
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
- mmap_read_lock(current->mm);
- vma = current->mm->mmap;
- while (vma) {
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma)
vsize += vma->vm_end - vma->vm_start;
- vma = vma->vm_next;
- }
- mmap_read_unlock(current->mm);
+ mmap_read_unlock(mm);
}
spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 9795d75b09b2..b529182e8b04 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,13 @@ int main(void)
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
+#ifdef CONFIG_LRU_GEN
+ DEFINE(LRU_GEN_WIDTH, order_base_2(MAX_NR_GENS + 1));
+ DEFINE(__LRU_REFS_WIDTH, MAX_NR_TIERS - 2);
+#else
+ DEFINE(LRU_GEN_WIDTH, 0);
+ DEFINE(__LRU_REFS_WIDTH, 0);
+#endif
/* End of constants */
return 0;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 711fd293b6de..59cf4dc728a5 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -2007,7 +2007,7 @@ out:
static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
{ \
u64 stack[stack_size / sizeof(u64)]; \
- u64 regs[MAX_BPF_EXT_REG]; \
+ u64 regs[MAX_BPF_EXT_REG] = {}; \
\
FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
ARG1 = (u64) (unsigned long) ctx; \
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 67e03e1833ba..c2a2182ce570 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -445,8 +445,8 @@ struct bpf_iter_seq_task_vma_info {
};
enum bpf_task_vma_iter_find_op {
- task_vma_iter_first_vma, /* use mm->mmap */
- task_vma_iter_next_vma, /* use curr_vma->vm_next */
+ task_vma_iter_first_vma, /* use find_vma() with addr 0 */
+ task_vma_iter_next_vma, /* use vma_next() with curr_vma */
task_vma_iter_find_vma, /* use find_vma() to find next vma */
};
@@ -544,10 +544,10 @@ again:
switch (op) {
case task_vma_iter_first_vma:
- curr_vma = curr_task->mm->mmap;
+ curr_vma = find_vma(curr_task->mm, 0);
break;
case task_vma_iter_next_vma:
- curr_vma = curr_vma->vm_next;
+ curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
break;
case task_vma_iter_find_vma:
/* We dropped mmap_lock so it is necessary to use find_vma
@@ -561,7 +561,7 @@ again:
if (curr_vma &&
curr_vma->vm_start == info->prev_vm_start &&
curr_vma->vm_end == info->prev_vm_end)
- curr_vma = curr_vma->vm_next;
+ curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
break;
}
if (!curr_vma) {
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 2c7ecca226be..fd4020835ec6 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -164,7 +164,6 @@ struct cgroup_mgctx {
#define DEFINE_CGROUP_MGCTX(name) \
struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
-extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 7beceb447211..d5e9ccde3ab8 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -50,7 +50,6 @@
#include <linux/pid.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/vmacache.h>
#include <linux/rcupdate.h>
#include <linux/irq.h>
#include <linux/security.h>
@@ -283,17 +282,6 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
if (!CACHE_FLUSH_IS_SAFE)
return;
- if (current->mm) {
- int i;
-
- for (i = 0; i < VMACACHE_SIZE; i++) {
- if (!current->vmacache.vmas[i])
- continue;
- flush_cache_range(current->vmacache.vmas[i],
- addr, addr + BREAK_INSTR_SIZE);
- }
- }
-
/* Force flush instruction cache if it was outside the mm */
flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
}
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 164ed9ef77a3..e39cb696cfbd 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -214,13 +214,22 @@ void __delayacct_freepages_end(void)
&current->delays->freepages_count);
}
-void __delayacct_thrashing_start(void)
+void __delayacct_thrashing_start(bool *in_thrashing)
{
+ *in_thrashing = !!current->in_thrashing;
+ if (*in_thrashing)
+ return;
+
+ current->in_thrashing = 1;
current->delays->thrashing_start = local_clock();
}
-void __delayacct_thrashing_end(void)
+void __delayacct_thrashing_end(bool *in_thrashing)
{
+ if (*in_thrashing)
+ return;
+
+ current->in_thrashing = 0;
delayacct_end(&current->delays->lock,
&current->delays->thrashing_start,
&current->delays->thrashing_delay,
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 27f272381cf2..33437d620644 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -10,6 +10,7 @@
#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/kmsan.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -156,6 +157,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
+ kmsan_handle_dma(page, offset, size, dir);
debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr;
@@ -194,11 +196,13 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
else
ents = ops->map_sg(dev, sg, nents, dir, attrs);
- if (ents > 0)
+ if (ents > 0) {
+ kmsan_handle_dma_sg(sg, nents, dir);
debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
- else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
- ents != -EIO && ents != -EREMOTEIO))
+ } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
+ ents != -EIO && ents != -EREMOTEIO)) {
return -EIO;
+ }
return ents;
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 063068a9ea9b..846add8394c4 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -5,6 +5,7 @@
#include <linux/resume_user_mode.h>
#include <linux/highmem.h>
#include <linux/jump_label.h>
+#include <linux/kmsan.h>
#include <linux/livepatch.h>
#include <linux/audit.h>
#include <linux/tick.h>
@@ -24,6 +25,7 @@ static __always_inline void __enter_from_user_mode(struct pt_regs *regs)
user_exit_irqoff();
instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
instrumentation_end();
}
@@ -352,6 +354,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0);
ct_irq_enter();
instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
instrumentation_end();
@@ -367,6 +370,7 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
*/
lockdep_hardirqs_off(CALLER_ADDR0);
instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
instrumentation_end();
@@ -452,6 +456,7 @@ irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
ct_nmi_enter();
instrumentation_begin();
+ kmsan_unpoison_entry_regs(regs);
trace_hardirqs_off_finish();
ftrace_nmi_enter();
instrumentation_end();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 43b0df997d13..aefc1e08e015 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10270,8 +10270,9 @@ static void perf_addr_filter_apply(struct perf_addr_filter *filter,
struct perf_addr_filter_range *fr)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (!vma->vm_file)
continue;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2eaa327f8158..d9e357b7e17c 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -19,7 +19,7 @@
#include <linux/export.h>
#include <linux/rmap.h> /* anon_vma_prepare */
#include <linux/mmu_notifier.h> /* set_pte_at_notify */
-#include <linux/swap.h> /* try_to_free_swap */
+#include <linux/swap.h> /* folio_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
@@ -154,8 +154,10 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
struct page *old_page, struct page *new_page)
{
+ struct folio *old_folio = page_folio(old_page);
+ struct folio *new_folio;
struct mm_struct *mm = vma->vm_mm;
- DEFINE_FOLIO_VMA_WALK(pvmw, page_folio(old_page), vma, addr, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
int err;
struct mmu_notifier_range range;
@@ -163,14 +165,14 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
addr + PAGE_SIZE);
if (new_page) {
- err = mem_cgroup_charge(page_folio(new_page), vma->vm_mm,
- GFP_KERNEL);
+ new_folio = page_folio(new_page);
+ err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
if (err)
return err;
}
- /* For try_to_free_swap() below */
- lock_page(old_page);
+ /* For folio_free_swap() below */
+ folio_lock(old_folio);
mmu_notifier_invalidate_range_start(&range);
err = -EAGAIN;
@@ -179,14 +181,14 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
if (new_page) {
- get_page(new_page);
+ folio_get(new_folio);
page_add_new_anon_rmap(new_page, vma, addr);
- lru_cache_add_inactive_or_unevictable(new_page, vma);
+ folio_add_lru_vma(new_folio, vma);
} else
/* no new page, just dec_mm_counter for old_page */
dec_mm_counter(mm, MM_ANONPAGES);
- if (!PageAnon(old_page)) {
+ if (!folio_test_anon(old_folio)) {
dec_mm_counter(mm, mm_counter_file(old_page));
inc_mm_counter(mm, MM_ANONPAGES);
}
@@ -198,15 +200,15 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
mk_pte(new_page, vma->vm_page_prot));
page_remove_rmap(old_page, vma, false);
- if (!page_mapped(old_page))
- try_to_free_swap(old_page);
+ if (!folio_mapped(old_folio))
+ folio_free_swap(old_folio);
page_vma_mapped_walk_done(&pvmw);
- put_page(old_page);
+ folio_put(old_folio);
err = 0;
unlock:
mmu_notifier_invalidate_range_end(&range);
- unlock_page(old_page);
+ folio_unlock(old_folio);
return err;
}
@@ -349,9 +351,10 @@ static bool valid_ref_ctr_vma(struct uprobe *uprobe,
static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *tmp;
- for (tmp = mm->mmap; tmp; tmp = tmp->vm_next)
+ for_each_vma(vmi, tmp)
if (valid_ref_ctr_vma(uprobe, tmp))
return tmp;
@@ -552,7 +555,7 @@ put_old:
/* try collapse pmd for compound page */
if (!ret && orig_page_huge)
- collapse_pte_mapped_thp(mm, vaddr);
+ collapse_pte_mapped_thp(mm, vaddr, false);
return ret;
}
@@ -1231,11 +1234,12 @@ int uprobe_apply(struct inode *inode, loff_t offset,
static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int err = 0;
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
unsigned long vaddr;
loff_t offset;
@@ -1983,9 +1987,10 @@ bool uprobe_deny_signal(void)
static void mmf_recalc_uprobes(struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (!valid_vma(vma, false))
continue;
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 4b8e7c94a3c0..98c859ffa728 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -60,6 +60,7 @@
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
+#include <linux/kmsan.h>
#include <linux/random.h>
#include <linux/rcuwait.h>
#include <linux/compat.h>
@@ -466,6 +467,7 @@ assign_new_owner:
goto retry;
}
WRITE_ONCE(mm->owner, c);
+ lru_gen_migrate_mm(mm);
task_unlock(c);
put_task_struct(c);
}
@@ -759,6 +761,7 @@ void __noreturn do_exit(long code)
WARN_ON(tsk->plug);
kcov_task_exit(tsk);
+ kmsan_task_exit(tsk);
coredump_task_exit(tsk);
ptrace_event(PTRACE_EVENT_EXIT, code);
diff --git a/kernel/fork.c b/kernel/fork.c
index 947eb1a6399a..5f986a32d0e6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,13 +37,13 @@
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
+#include <linux/kmsan.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
-#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
@@ -475,7 +475,6 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
*/
*new = data_race(*orig);
INIT_LIST_HEAD(&new->anon_vma_chain);
- new->vm_next = new->vm_prev = NULL;
dup_anon_vma_name(orig, new);
}
return new;
@@ -580,11 +579,12 @@ static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
static __latent_entropy int dup_mmap(struct mm_struct *mm,
struct mm_struct *oldmm)
{
- struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
- struct rb_node **rb_link, *rb_parent;
+ struct vm_area_struct *mpnt, *tmp;
int retval;
- unsigned long charge;
+ unsigned long charge = 0;
LIST_HEAD(uf);
+ MA_STATE(old_mas, &oldmm->mm_mt, 0, 0);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
uprobe_start_dup_mmap();
if (mmap_write_lock_killable(oldmm)) {
@@ -606,16 +606,16 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
mm->exec_vm = oldmm->exec_vm;
mm->stack_vm = oldmm->stack_vm;
- rb_link = &mm->mm_rb.rb_node;
- rb_parent = NULL;
- pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
khugepaged_fork(mm, oldmm);
- prev = NULL;
- for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
+ retval = mas_expected_entries(&mas, oldmm->map_count);
+ if (retval)
+ goto out;
+
+ mas_for_each(&old_mas, mpnt, ULONG_MAX) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
@@ -629,7 +629,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
*/
if (fatal_signal_pending(current)) {
retval = -EINTR;
- goto out;
+ goto loop_out;
}
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
@@ -675,24 +675,17 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
}
/*
- * Clear hugetlb-related page reserves for children. This only
- * affects MAP_PRIVATE mappings. Faults generated by the child
- * are not guaranteed to succeed, even if read-only
+ * Copy/update hugetlb private vma information.
*/
if (is_vm_hugetlb_page(tmp))
- reset_vma_resv_huge_pages(tmp);
-
- /*
- * Link in the new vma and copy the page table entries.
- */
- *pprev = tmp;
- pprev = &tmp->vm_next;
- tmp->vm_prev = prev;
- prev = tmp;
+ hugetlb_dup_vma_private(tmp);
- __vma_link_rb(mm, tmp, rb_link, rb_parent);
- rb_link = &tmp->vm_rb.rb_right;
- rb_parent = &tmp->vm_rb;
+ /* Link the vma into the MT */
+ mas.index = tmp->vm_start;
+ mas.last = tmp->vm_end - 1;
+ mas_store(&mas, tmp);
+ if (mas_is_err(&mas))
+ goto fail_nomem_mas_store;
mm->map_count++;
if (!(tmp->vm_flags & VM_WIPEONFORK))
@@ -702,10 +695,12 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
tmp->vm_ops->open(tmp);
if (retval)
- goto out;
+ goto loop_out;
}
/* a new mm has just been created */
retval = arch_dup_mmap(oldmm, mm);
+loop_out:
+ mas_destroy(&mas);
out:
mmap_write_unlock(mm);
flush_tlb_mm(oldmm);
@@ -714,6 +709,9 @@ out:
fail_uprobe_end:
uprobe_end_dup_mmap();
return retval;
+
+fail_nomem_mas_store:
+ unlink_anon_vmas(tmp);
fail_nomem_anon_vma_fork:
mpol_put(vma_policy(tmp));
fail_nomem_policy:
@@ -721,7 +719,7 @@ fail_nomem_policy:
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
- goto out;
+ goto loop_out;
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
@@ -1026,6 +1024,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->worker_private = NULL;
kcov_task_init(tsk);
+ kmsan_task_create(tsk);
kmap_local_fork(tsk);
#ifdef CONFIG_FAULT_INJECTION
@@ -1109,9 +1108,8 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
- mm->mmap = NULL;
- mm->mm_rb = RB_ROOT;
- mm->vmacache_seqnum = 0;
+ mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
+ mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
seqcount_init(&mm->write_protect_seq);
@@ -1152,6 +1150,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
goto fail_nocontext;
mm->user_ns = get_user_ns(user_ns);
+ lru_gen_init_mm(mm);
return mm;
fail_nocontext:
@@ -1194,6 +1193,7 @@ static inline void __mmput(struct mm_struct *mm)
}
if (mm->binfmt)
module_put(mm->binfmt->module);
+ lru_gen_del_mm(mm);
mmdrop(mm);
}
@@ -1285,13 +1285,16 @@ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
/* Forbid mm->exe_file change if old file still mapped. */
old_exe_file = get_mm_exe_file(mm);
if (old_exe_file) {
+ VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
- for (vma = mm->mmap; vma && !ret; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (!vma->vm_file)
continue;
if (path_equal(&vma->vm_file->f_path,
- &old_exe_file->f_path))
+ &old_exe_file->f_path)) {
ret = -EBUSY;
+ break;
+ }
}
mmap_read_unlock(mm);
fput(old_exe_file);
@@ -1566,9 +1569,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
if (!oldmm)
return 0;
- /* initialize the new vmacache entries */
- vmacache_flush(tsk);
-
if (clone_flags & CLONE_VM) {
mmget(oldmm);
mm = oldmm;
@@ -2693,6 +2693,13 @@ pid_t kernel_clone(struct kernel_clone_args *args)
get_task_struct(p);
}
+ if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
+ /* lock the task to synchronize with memcg migration */
+ task_lock(p);
+ lru_gen_add_mm(p->mm);
+ task_unlock(p);
+ }
+
wake_up_new_task(p);
/* forking complete and child started to run, tell ptracer */
diff --git a/kernel/kcov.c b/kernel/kcov.c
index e19c84b02452..e5cd09fd8a05 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/hashtable.h>
#include <linux/init.h>
+#include <linux/kmsan-checks.h>
#include <linux/mm.h>
#include <linux/preempt.h>
#include <linux/printk.h>
@@ -152,6 +153,12 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
INIT_LIST_HEAD(&area->list);
area->size = size;
list_add(&area->list, &kcov_remote_areas);
+ /*
+ * KMSAN doesn't instrument this file, so it may not know area->list
+ * is initialized. Unpoison it explicitly to avoid reports in
+ * kcov_remote_area_get().
+ */
+ kmsan_unpoison_memory(&area->list, sizeof(area->list));
}
static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile
index d51cabf28f38..ea925731fa40 100644
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
@@ -5,8 +5,9 @@ KCOV_INSTRUMENT := n
obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
-# Avoid recursion lockdep -> KCSAN -> ... -> lockdep.
+# Avoid recursion lockdep -> sanitizer -> ... -> lockdep.
KCSAN_SANITIZE_lockdep.o := n
+KMSAN_SANITIZE_lockdep.o := n
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f4d02201f424..e4ce124ec701 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4389,6 +4389,17 @@ void set_numabalancing_state(bool enabled)
}
#ifdef CONFIG_PROC_SYSCTL
+static void reset_memory_tiering(void)
+{
+ struct pglist_data *pgdat;
+
+ for_each_online_pgdat(pgdat) {
+ pgdat->nbp_threshold = 0;
+ pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
+ pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
+ }
+}
+
int sysctl_numa_balancing(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
@@ -4405,6 +4416,9 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
if (err < 0)
return err;
if (write) {
+ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ (state & NUMA_BALANCING_MEMORY_TIERING))
+ reset_memory_tiering();
sysctl_numa_balancing_mode = state;
__set_numabalancing_state(state);
}
@@ -5159,6 +5173,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
* finish_task_switch()'s mmdrop().
*/
switch_mm_irqs_off(prev->active_mm, next->mm, next);
+ lru_gen_use_mm(next->mm);
if (!prev->mm) { // from kernel
/* will mmdrop() in finish_task_switch(). */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 667876da8382..1637b65ba07a 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -333,6 +333,7 @@ static __init int sched_init_debug(void)
debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
+ debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
#endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5ffec4370602..e4a0b8bd941c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -40,6 +40,7 @@
#include <linux/cpuidle.h>
#include <linux/interrupt.h>
+#include <linux/memory-tiers.h>
#include <linux/mempolicy.h>
#include <linux/mutex_api.h>
#include <linux/profile.h>
@@ -1090,6 +1091,12 @@ unsigned int sysctl_numa_balancing_scan_size = 256;
/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
unsigned int sysctl_numa_balancing_scan_delay = 1000;
+/* The page with hint page fault latency < threshold in ms is considered hot */
+unsigned int sysctl_numa_balancing_hot_threshold = MSEC_PER_SEC;
+
+/* Restrict the NUMA promotion throughput (MB/s) for each target node. */
+unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
+
struct numa_group {
refcount_t refcount;
@@ -1432,6 +1439,120 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
return 1000 * faults / total_faults;
}
+/*
+ * If memory tiering mode is enabled, cpupid of slow memory page is
+ * used to record scan time instead of CPU and PID. When tiering mode
+ * is disabled at run time, the scan time (in cpupid) will be
+ * interpreted as CPU and PID. So CPU needs to be checked to avoid to
+ * access out of array bound.
+ */
+static inline bool cpupid_valid(int cpupid)
+{
+ return cpupid_to_cpu(cpupid) < nr_cpu_ids;
+}
+
+/*
+ * For memory tiering mode, if there are enough free pages (more than
+ * enough watermark defined here) in fast memory node, to take full
+ * advantage of fast memory capacity, all recently accessed slow
+ * memory pages will be migrated to fast memory node without
+ * considering hot threshold.
+ */
+static bool pgdat_free_space_enough(struct pglist_data *pgdat)
+{
+ int z;
+ unsigned long enough_wmark;
+
+ enough_wmark = max(1UL * 1024 * 1024 * 1024 >> PAGE_SHIFT,
+ pgdat->node_present_pages >> 4);
+ for (z = pgdat->nr_zones - 1; z >= 0; z--) {
+ struct zone *zone = pgdat->node_zones + z;
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (zone_watermark_ok(zone, 0,
+ wmark_pages(zone, WMARK_PROMO) + enough_wmark,
+ ZONE_MOVABLE, 0))
+ return true;
+ }
+ return false;
+}
+
+/*
+ * For memory tiering mode, when page tables are scanned, the scan
+ * time will be recorded in struct page in addition to make page
+ * PROT_NONE for slow memory page. So when the page is accessed, in
+ * hint page fault handler, the hint page fault latency is calculated
+ * via,
+ *
+ * hint page fault latency = hint page fault time - scan time
+ *
+ * The smaller the hint page fault latency, the higher the possibility
+ * for the page to be hot.
+ */
+static int numa_hint_fault_latency(struct page *page)
+{
+ int last_time, time;
+
+ time = jiffies_to_msecs(jiffies);
+ last_time = xchg_page_access_time(page, time);
+
+ return (time - last_time) & PAGE_ACCESS_TIME_MASK;
+}
+
+/*
+ * For memory tiering mode, too high promotion/demotion throughput may
+ * hurt application latency. So we provide a mechanism to rate limit
+ * the number of pages that are tried to be promoted.
+ */
+static bool numa_promotion_rate_limit(struct pglist_data *pgdat,
+ unsigned long rate_limit, int nr)
+{
+ unsigned long nr_cand;
+ unsigned int now, start;
+
+ now = jiffies_to_msecs(jiffies);
+ mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
+ nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
+ start = pgdat->nbp_rl_start;
+ if (now - start > MSEC_PER_SEC &&
+ cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
+ pgdat->nbp_rl_nr_cand = nr_cand;
+ if (nr_cand - pgdat->nbp_rl_nr_cand >= rate_limit)
+ return true;
+ return false;
+}
+
+#define NUMA_MIGRATION_ADJUST_STEPS 16
+
+static void numa_promotion_adjust_threshold(struct pglist_data *pgdat,
+ unsigned long rate_limit,
+ unsigned int ref_th)
+{
+ unsigned int now, start, th_period, unit_th, th;
+ unsigned long nr_cand, ref_cand, diff_cand;
+
+ now = jiffies_to_msecs(jiffies);
+ th_period = sysctl_numa_balancing_scan_period_max;
+ start = pgdat->nbp_th_start;
+ if (now - start > th_period &&
+ cmpxchg(&pgdat->nbp_th_start, start, now) == start) {
+ ref_cand = rate_limit *
+ sysctl_numa_balancing_scan_period_max / MSEC_PER_SEC;
+ nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
+ diff_cand = nr_cand - pgdat->nbp_th_nr_cand;
+ unit_th = ref_th * 2 / NUMA_MIGRATION_ADJUST_STEPS;
+ th = pgdat->nbp_threshold ? : ref_th;
+ if (diff_cand > ref_cand * 11 / 10)
+ th = max(th - unit_th, unit_th);
+ else if (diff_cand < ref_cand * 9 / 10)
+ th = min(th + unit_th, ref_th * 2);
+ pgdat->nbp_th_nr_cand = nr_cand;
+ pgdat->nbp_threshold = th;
+ }
+}
+
bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int src_nid, int dst_cpu)
{
@@ -1439,9 +1560,44 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int dst_nid = cpu_to_node(dst_cpu);
int last_cpupid, this_cpupid;
+ /*
+ * The pages in slow memory node should be migrated according
+ * to hot/cold instead of private/shared.
+ */
+ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
+ !node_is_toptier(src_nid)) {
+ struct pglist_data *pgdat;
+ unsigned long rate_limit;
+ unsigned int latency, th, def_th;
+
+ pgdat = NODE_DATA(dst_nid);
+ if (pgdat_free_space_enough(pgdat)) {
+ /* workload changed, reset hot threshold */
+ pgdat->nbp_threshold = 0;
+ return true;
+ }
+
+ def_th = sysctl_numa_balancing_hot_threshold;
+ rate_limit = sysctl_numa_balancing_promote_rate_limit << \
+ (20 - PAGE_SHIFT);
+ numa_promotion_adjust_threshold(pgdat, rate_limit, def_th);
+
+ th = pgdat->nbp_threshold ? : def_th;
+ latency = numa_hint_fault_latency(page);
+ if (latency >= th)
+ return false;
+
+ return !numa_promotion_rate_limit(pgdat, rate_limit,
+ thp_nr_pages(page));
+ }
+
this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ !node_is_toptier(src_nid) && !cpupid_valid(last_cpupid))
+ return false;
+
/*
* Allow first faults or private faults to migrate immediately early in
* the lifetime of a task. The magic number 4 is based on waiting for
@@ -2681,6 +2837,15 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
if (!p->mm)
return;
+ /*
+ * NUMA faults statistics are unnecessary for the slow memory
+ * node for memory tiering mode.
+ */
+ if (!node_is_toptier(mem_node) &&
+ (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING ||
+ !cpupid_valid(last_cpupid)))
+ return;
+
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults)) {
int size = sizeof(*p->numa_faults) *
@@ -2761,6 +2926,7 @@ static void task_numa_work(struct callback_head *work)
struct task_struct *p = current;
struct mm_struct *mm = p->mm;
u64 runtime = p->se.sum_exec_runtime;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long nr_pte_updates = 0;
@@ -2817,13 +2983,16 @@ static void task_numa_work(struct callback_head *work)
if (!mmap_read_trylock(mm))
return;
- vma = find_vma(mm, start);
+ mas_set(&mas, start);
+ vma = mas_find(&mas, ULONG_MAX);
if (!vma) {
reset_ptenuma_scan(p);
start = 0;
- vma = mm->mmap;
+ mas_set(&mas, start);
+ vma = mas_find(&mas, ULONG_MAX);
}
- for (; vma; vma = vma->vm_next) {
+
+ for (; vma; vma = mas_find(&mas, ULONG_MAX)) {
if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
continue;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1fc198be1ffd..1644242ecd11 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2446,6 +2446,7 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
+extern unsigned int sysctl_numa_balancing_hot_threshold;
#endif
#ifdef CONFIG_SCHED_HRTICK
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 82ab288758f5..188c305aeb8b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1643,6 +1643,14 @@ static struct ctl_table kern_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_FOUR,
},
+ {
+ .procname = "numa_balancing_promote_rate_limit_MBps",
+ .data = &sysctl_numa_balancing_promote_rate_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
#endif /* CONFIG_NUMA_BALANCING */
{
.procname = "panic",
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3761118d1879..73178b0e43a4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -817,13 +817,12 @@ config DEBUG_VM
If unsure, say N.
-config DEBUG_VM_VMACACHE
- bool "Debug VMA caching"
+config DEBUG_VM_MAPLE_TREE
+ bool "Debug VM maple trees"
depends on DEBUG_VM
+ select DEBUG_MAPLE_TREE
help
- Enable this to turn on VMA caching debug information. Doing so
- can cause significant overhead, so only enable it in non-production
- environments.
+ Enable VM maple tree debugging information and extra validations.
If unsure, say N.
@@ -976,6 +975,7 @@ config DEBUG_STACKOVERFLOW
source "lib/Kconfig.kasan"
source "lib/Kconfig.kfence"
+source "lib/Kconfig.kmsan"
endmenu # "Memory Debugging"
@@ -1640,6 +1640,14 @@ config BUG_ON_DATA_CORRUPTION
If unsure, say N.
+config DEBUG_MAPLE_TREE
+ bool "Debug maple trees"
+ depends on DEBUG_KERNEL
+ help
+ Enable maple tree debugging information and extra validations.
+
+ If unsure, say N.
+
endmenu
config DEBUG_CREDENTIALS
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index f0973da583e0..ca09b1cf8ee9 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -167,14 +167,6 @@ config KASAN_STACK
as well, as it adds inline-style instrumentation that is run
unconditionally.
-config KASAN_TAGS_IDENTIFY
- bool "Memory corruption type identification"
- depends on KASAN_SW_TAGS || KASAN_HW_TAGS
- help
- Enables best-effort identification of the bug types (use-after-free
- or out-of-bounds) at the cost of increased memory consumption.
- Only applicable for the tag-based KASAN modes.
-
config KASAN_VMALLOC
bool "Check accesses to vmalloc allocations"
depends on HAVE_ARCH_KASAN_VMALLOC
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
new file mode 100644
index 000000000000..b2489dd6503f
--- /dev/null
+++ b/lib/Kconfig.kmsan
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config HAVE_ARCH_KMSAN
+ bool
+
+config HAVE_KMSAN_COMPILER
+ # Clang versions <14.0.0 also support -fsanitize=kernel-memory, but not
+ # all the features necessary to build the kernel with KMSAN.
+ depends on CC_IS_CLANG && CLANG_VERSION >= 140000
+ def_bool $(cc-option,-fsanitize=kernel-memory -mllvm -msan-disable-checks=1)
+
+config KMSAN
+ bool "KMSAN: detector of uninitialized values use"
+ depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
+ depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
+ select STACKDEPOT
+ select STACKDEPOT_ALWAYS_INIT
+ help
+ KernelMemorySanitizer (KMSAN) is a dynamic detector of uses of
+ uninitialized values in the kernel. It is based on compiler
+ instrumentation provided by Clang and thus requires Clang to build.
+
+ An important note is that KMSAN is not intended for production use,
+ because it drastically increases kernel memory footprint and slows
+ the whole system down.
+
+ See <file:Documentation/dev-tools/kmsan.rst> for more details.
+
+if KMSAN
+
+config HAVE_KMSAN_PARAM_RETVAL
+ # -fsanitize-memory-param-retval is supported only by Clang >= 14.
+ depends on HAVE_KMSAN_COMPILER
+ def_bool $(cc-option,-fsanitize=kernel-memory -fsanitize-memory-param-retval)
+
+config KMSAN_CHECK_PARAM_RETVAL
+ bool "Check for uninitialized values passed to and returned from functions"
+ default y
+ depends on HAVE_KMSAN_PARAM_RETVAL
+ help
+ If the compiler supports -fsanitize-memory-param-retval, KMSAN will
+ eagerly check every function parameter passed by value and every
+ function return value.
+
+ Disabling KMSAN_CHECK_PARAM_RETVAL will result in tracking shadow for
+ function parameters and return values across function borders. This
+ is a more relaxed mode, but it generates more instrumentation code and
+ may potentially report errors in corner cases when non-instrumented
+ functions call instrumented ones.
+
+config KMSAN_KUNIT_TEST
+ tristate "KMSAN integration test suite" if !KUNIT_ALL_TESTS
+ default KUNIT_ALL_TESTS
+ depends on TRACEPOINTS && KUNIT
+ help
+ Test suite for KMSAN, testing various error detection scenarios,
+ and checking that reports are correctly output to console.
+
+ Say Y here if you want the test to be built into the kernel and run
+ during boot; say M if you want the test to build as a module; say N
+ if you are unsure.
+
+endif
diff --git a/lib/Makefile b/lib/Makefile
index ad570b7699ba..161d6a724ff7 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o irq_regs.o argv_split.o \
+ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -65,11 +65,6 @@ obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
-obj-$(CONFIG_KASAN_KUNIT_TEST) += test_kasan.o
-CFLAGS_test_kasan.o += -fno-builtin
-CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
-obj-$(CONFIG_KASAN_MODULE_TEST) += test_kasan_module.o
-CFLAGS_test_kasan_module.o += -fno-builtin
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
UBSAN_SANITIZE_test_ubsan.o := y
@@ -275,6 +270,9 @@ obj-$(CONFIG_POLYNOMIAL) += polynomial.o
CFLAGS_stackdepot.o += -fno-builtin
obj-$(CONFIG_STACKDEPOT) += stackdepot.o
KASAN_SANITIZE_stackdepot.o := n
+# In particular, instrumenting stackdepot.c with KMSAN will result in infinite
+# recursion.
+KMSAN_SANITIZE_stackdepot.o := n
KCOV_INSTRUMENT_stackdepot.o := n
obj-$(CONFIG_REF_TRACKER) += ref_tracker.o
diff --git a/lib/iomap.c b/lib/iomap.c
index fbaa3e8f19d6..4f8b31baa575 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -6,6 +6,7 @@
*/
#include <linux/pci.h>
#include <linux/io.h>
+#include <linux/kmsan-checks.h>
#include <linux/export.h>
@@ -70,26 +71,35 @@ static void bad_io_access(unsigned long port, const char *access)
#define mmio_read64be(addr) swab64(readq(addr))
#endif
+/*
+ * Here and below, we apply __no_kmsan_checks to functions reading data from
+ * hardware, to ensure that KMSAN marks their return values as initialized.
+ */
+__no_kmsan_checks
unsigned int ioread8(const void __iomem *addr)
{
IO_COND(addr, return inb(port), return readb(addr));
return 0xff;
}
+__no_kmsan_checks
unsigned int ioread16(const void __iomem *addr)
{
IO_COND(addr, return inw(port), return readw(addr));
return 0xffff;
}
+__no_kmsan_checks
unsigned int ioread16be(const void __iomem *addr)
{
IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
return 0xffff;
}
+__no_kmsan_checks
unsigned int ioread32(const void __iomem *addr)
{
IO_COND(addr, return inl(port), return readl(addr));
return 0xffffffff;
}
+__no_kmsan_checks
unsigned int ioread32be(const void __iomem *addr)
{
IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
@@ -142,18 +152,21 @@ static u64 pio_read64be_hi_lo(unsigned long port)
return lo | (hi << 32);
}
+__no_kmsan_checks
u64 ioread64_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr));
return 0xffffffffffffffffULL;
}
+__no_kmsan_checks
u64 ioread64_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr));
return 0xffffffffffffffffULL;
}
+__no_kmsan_checks
u64 ioread64be_lo_hi(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_lo_hi(port),
@@ -161,6 +174,7 @@ u64 ioread64be_lo_hi(const void __iomem *addr)
return 0xffffffffffffffffULL;
}
+__no_kmsan_checks
u64 ioread64be_hi_lo(const void __iomem *addr)
{
IO_COND(addr, return pio_read64be_hi_lo(port),
@@ -188,22 +202,32 @@ EXPORT_SYMBOL(ioread64be_hi_lo);
void iowrite8(u8 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outb(val,port), writeb(val, addr));
}
void iowrite16(u16 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outw(val,port), writew(val, addr));
}
void iowrite16be(u16 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
}
void iowrite32(u32 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, outl(val,port), writel(val, addr));
}
void iowrite32be(u32 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
}
EXPORT_SYMBOL(iowrite8);
@@ -239,24 +263,32 @@ static void pio_write64be_hi_lo(u64 val, unsigned long port)
void iowrite64_lo_hi(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_lo_hi(val, port),
writeq(val, addr));
}
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64_hi_lo(val, port),
writeq(val, addr));
}
void iowrite64be_lo_hi(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_lo_hi(val, port),
mmio_write64be(val, addr));
}
void iowrite64be_hi_lo(u64 val, void __iomem *addr)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(&val, sizeof(val));
IO_COND(addr, pio_write64be_hi_lo(val, port),
mmio_write64be(val, addr));
}
@@ -328,14 +360,20 @@ static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insb(port,dst,count), mmio_insb(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count);
}
void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insw(port,dst,count), mmio_insw(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 2);
}
void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count)
{
IO_COND(addr, insl(port,dst,count), mmio_insl(addr, dst, count));
+ /* KMSAN must treat values read from devices as initialized. */
+ kmsan_unpoison_memory(dst, count * 4);
}
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
@@ -343,14 +381,20 @@ EXPORT_SYMBOL(ioread32_rep);
void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count);
IO_COND(addr, outsb(port, src, count), mmio_outsb(addr, src, count));
}
void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 2);
IO_COND(addr, outsw(port, src, count), mmio_outsw(addr, src, count));
}
void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
{
+ /* Make sure uninitialized memory isn't copied to devices. */
+ kmsan_check_memory(src, count * 4);
IO_COND(addr, outsl(port, src,count), mmio_outsl(addr, src, count));
}
EXPORT_SYMBOL(iowrite8_rep);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 4b7fce72e3e5..c3ca28ca68a6 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -174,13 +174,16 @@ static int copyout(void __user *to, const void *from, size_t n)
static int copyin(void *to, const void __user *from, size_t n)
{
+ size_t res = n;
+
if (should_fail_usercopy())
return n;
if (access_ok(from, n)) {
- instrument_copy_from_user(to, from, n);
- n = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
+ res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
- return n;
+ return res;
}
static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
new file mode 100644
index 000000000000..e1743803c851
--- /dev/null
+++ b/lib/maple_tree.c
@@ -0,0 +1,7130 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Maple Tree implementation
+ * Copyright (c) 2018-2022 Oracle Corporation
+ * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
+ * Matthew Wilcox <willy@infradead.org>
+ */
+
+/*
+ * DOC: Interesting implementation details of the Maple Tree
+ *
+ * Each node type has a number of slots for entries and a number of slots for
+ * pivots. In the case of dense nodes, the pivots are implied by the position
+ * and are simply the slot index + the minimum of the node.
+ *
+ * In regular B-Tree terms, pivots are called keys. The term pivot is used to
+ * indicate that the tree is specifying ranges, Pivots may appear in the
+ * subtree with an entry attached to the value where as keys are unique to a
+ * specific position of a B-tree. Pivot values are inclusive of the slot with
+ * the same index.
+ *
+ *
+ * The following illustrates the layout of a range64 nodes slots and pivots.
+ *
+ *
+ * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
+ * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬
+ * │ │ │ │ │ │ │ │ └─ Implied maximum
+ * │ │ │ │ │ │ │ └─ Pivot 14
+ * │ │ │ │ │ │ └─ Pivot 13
+ * │ │ │ │ │ └─ Pivot 12
+ * │ │ │ │ └─ Pivot 11
+ * │ │ │ └─ Pivot 2
+ * │ │ └─ Pivot 1
+ * │ └─ Pivot 0
+ * └─ Implied minimum
+ *
+ * Slot contents:
+ * Internal (non-leaf) nodes contain pointers to other nodes.
+ * Leaf nodes contain entries.
+ *
+ * The location of interest is often referred to as an offset. All offsets have
+ * a slot, but the last offset has an implied pivot from the node above (or
+ * UINT_MAX for the root node.
+ *
+ * Ranges complicate certain write activities. When modifying any of
+ * the B-tree variants, it is known that one entry will either be added or
+ * deleted. When modifying the Maple Tree, one store operation may overwrite
+ * the entire data set, or one half of the tree, or the middle half of the tree.
+ *
+ */
+
+
+#include <linux/maple_tree.h>
+#include <linux/xarray.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/limits.h>
+#include <asm/barrier.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/maple_tree.h>
+
+#define MA_ROOT_PARENT 1
+
+/*
+ * Maple state flags
+ * * MA_STATE_BULK - Bulk insert mode
+ * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
+ * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
+ */
+#define MA_STATE_BULK 1
+#define MA_STATE_REBALANCE 2
+#define MA_STATE_PREALLOC 4
+
+#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
+#define ma_mnode_ptr(x) ((struct maple_node *)(x))
+#define ma_enode_ptr(x) ((struct maple_enode *)(x))
+static struct kmem_cache *maple_node_cache;
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+static const unsigned long mt_max[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = ULONG_MAX,
+ [maple_range_64] = ULONG_MAX,
+ [maple_arange_64] = ULONG_MAX,
+};
+#define mt_node_max(x) mt_max[mte_node_type(x)]
+#endif
+
+static const unsigned char mt_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS,
+};
+#define mt_slot_count(x) mt_slots[mte_node_type(x)]
+
+static const unsigned char mt_pivots[] = {
+ [maple_dense] = 0,
+ [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_range_64] = MAPLE_RANGE64_SLOTS - 1,
+ [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1,
+};
+#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
+
+static const unsigned char mt_min_slots[] = {
+ [maple_dense] = MAPLE_NODE_SLOTS / 2,
+ [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2,
+ [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1,
+};
+#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
+
+#define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
+#define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
+
+struct maple_big_node {
+ struct maple_pnode *parent;
+ unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
+ union {
+ struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
+ struct {
+ unsigned long padding[MAPLE_BIG_NODE_GAPS];
+ unsigned long gap[MAPLE_BIG_NODE_GAPS];
+ };
+ };
+ unsigned char b_end;
+ enum maple_type type;
+};
+
+/*
+ * The maple_subtree_state is used to build a tree to replace a segment of an
+ * existing tree in a more atomic way. Any walkers of the older tree will hit a
+ * dead node and restart on updates.
+ */
+struct maple_subtree_state {
+ struct ma_state *orig_l; /* Original left side of subtree */
+ struct ma_state *orig_r; /* Original right side of subtree */
+ struct ma_state *l; /* New left side of subtree */
+ struct ma_state *m; /* New middle of subtree (rare) */
+ struct ma_state *r; /* New right side of subtree */
+ struct ma_topiary *free; /* nodes to be freed */
+ struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */
+ struct maple_big_node *bn;
+};
+
+/* Functions */
+static inline struct maple_node *mt_alloc_one(gfp_t gfp)
+{
+ return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
+}
+
+static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
+{
+ return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
+ nodes);
+}
+
+static inline void mt_free_bulk(size_t size, void __rcu **nodes)
+{
+ kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
+}
+
+static void mt_free_rcu(struct rcu_head *head)
+{
+ struct maple_node *node = container_of(head, struct maple_node, rcu);
+
+ kmem_cache_free(maple_node_cache, node);
+}
+
+/*
+ * ma_free_rcu() - Use rcu callback to free a maple node
+ * @node: The node to free
+ *
+ * The maple tree uses the parent pointer to indicate this node is no longer in
+ * use and will be freed.
+ */
+static void ma_free_rcu(struct maple_node *node)
+{
+ node->parent = ma_parent_ptr(node);
+ call_rcu(&node->rcu, mt_free_rcu);
+}
+
+static unsigned int mt_height(const struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET;
+}
+
+static void mas_set_height(struct ma_state *mas)
+{
+ unsigned int new_flags = mas->tree->ma_flags;
+
+ new_flags &= ~MT_FLAGS_HEIGHT_MASK;
+ BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
+ new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
+ mas->tree->ma_flags = new_flags;
+}
+
+static unsigned int mas_mt_height(struct ma_state *mas)
+{
+ return mt_height(mas->tree);
+}
+
+static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+{
+ return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
+ MAPLE_NODE_TYPE_MASK;
+}
+
+static inline bool ma_is_dense(const enum maple_type type)
+{
+ return type < maple_leaf_64;
+}
+
+static inline bool ma_is_leaf(const enum maple_type type)
+{
+ return type < maple_range_64;
+}
+
+static inline bool mte_is_leaf(const struct maple_enode *entry)
+{
+ return ma_is_leaf(mte_node_type(entry));
+}
+
+/*
+ * We also reserve values with the bottom two bits set to '10' which are
+ * below 4096
+ */
+static inline bool mt_is_reserved(const void *entry)
+{
+ return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
+ xa_is_internal(entry);
+}
+
+static inline void mas_set_err(struct ma_state *mas, long err)
+{
+ mas->node = MA_ERROR(err);
+}
+
+static inline bool mas_is_ptr(struct ma_state *mas)
+{
+ return mas->node == MAS_ROOT;
+}
+
+static inline bool mas_is_start(struct ma_state *mas)
+{
+ return mas->node == MAS_START;
+}
+
+bool mas_is_err(struct ma_state *mas)
+{
+ return xa_is_err(mas->node);
+}
+
+static inline bool mas_searchable(struct ma_state *mas)
+{
+ if (mas_is_none(mas))
+ return false;
+
+ if (mas_is_ptr(mas))
+ return false;
+
+ return true;
+}
+
+static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+{
+ return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
+ * @entry: The maple encoded node
+ *
+ * Return: a maple topiary pointer
+ */
+static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
+{
+ return (struct maple_topiary *)
+ ((unsigned long)entry & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * mas_mn() - Get the maple state node.
+ * @mas: The maple state
+ *
+ * Return: the maple node (not encoded - bare pointer).
+ */
+static inline struct maple_node *mas_mn(const struct ma_state *mas)
+{
+ return mte_to_node(mas->node);
+}
+
+/*
+ * mte_set_node_dead() - Set a maple encoded node as dead.
+ * @mn: The maple encoded node.
+ */
+static inline void mte_set_node_dead(struct maple_enode *mn)
+{
+ mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
+ smp_wmb(); /* Needed for RCU */
+}
+
+/* Bit 1 indicates the root is a node */
+#define MAPLE_ROOT_NODE 0x02
+/* maple_type stored bit 3-6 */
+#define MAPLE_ENODE_TYPE_SHIFT 0x03
+/* Bit 2 means a NULL somewhere below */
+#define MAPLE_ENODE_NULL 0x04
+
+static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
+ enum maple_type type)
+{
+ return (void *)((unsigned long)node |
+ (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
+}
+
+static inline void *mte_mk_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
+}
+
+static inline void *mte_safe_root(const struct maple_enode *node)
+{
+ return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
+}
+
+static inline void mte_set_full(const struct maple_enode *node)
+{
+ node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
+}
+
+static inline void mte_clear_full(const struct maple_enode *node)
+{
+ node = (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+}
+
+static inline bool ma_is_root(struct maple_node *node)
+{
+ return ((unsigned long)node->parent & MA_ROOT_PARENT);
+}
+
+static inline bool mte_is_root(const struct maple_enode *node)
+{
+ return ma_is_root(mte_to_node(node));
+}
+
+static inline bool mas_is_root_limits(const struct ma_state *mas)
+{
+ return !mas->min && mas->max == ULONG_MAX;
+}
+
+static inline bool mt_is_alloc(struct maple_tree *mt)
+{
+ return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
+}
+
+/*
+ * The Parent Pointer
+ * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
+ * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
+ * bit values need an extra bit to store the offset. This extra bit comes from
+ * a reuse of the last bit in the node type. This is possible by using bit 1 to
+ * indicate if bit 2 is part of the type or the slot.
+ *
+ * Note types:
+ * 0x??1 = Root
+ * 0x?00 = 16 bit nodes
+ * 0x010 = 32 bit nodes
+ * 0x110 = 64 bit nodes
+ *
+ * Slot size and alignment
+ * 0b??1 : Root
+ * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
+ * 0b010 : 32 bit values, type in 0-2, slot in 3-7
+ * 0b110 : 64 bit values, type in 0-2, slot in 3-7
+ */
+
+#define MAPLE_PARENT_ROOT 0x01
+
+#define MAPLE_PARENT_SLOT_SHIFT 0x03
+#define MAPLE_PARENT_SLOT_MASK 0xF8
+
+#define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
+#define MAPLE_PARENT_16B_SLOT_MASK 0xFC
+
+#define MAPLE_PARENT_RANGE64 0x06
+#define MAPLE_PARENT_RANGE32 0x04
+#define MAPLE_PARENT_NOT_RANGE16 0x02
+
+/*
+ * mte_parent_shift() - Get the parent shift for the slot storage.
+ * @parent: The parent pointer cast as an unsigned long
+ * Return: The shift into that pointer to the star to of the slot
+ */
+static inline unsigned long mte_parent_shift(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_SHIFT;
+
+ return MAPLE_PARENT_16B_SLOT_SHIFT;
+}
+
+/*
+ * mte_parent_slot_mask() - Get the slot mask for the parent.
+ * @parent: The parent pointer cast as an unsigned long.
+ * Return: The slot mask for that parent.
+ */
+static inline unsigned long mte_parent_slot_mask(unsigned long parent)
+{
+ /* Note bit 1 == 0 means 16B */
+ if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
+ return MAPLE_PARENT_SLOT_MASK;
+
+ return MAPLE_PARENT_16B_SLOT_MASK;
+}
+
+/*
+ * mas_parent_enum() - Return the maple_type of the parent from the stored
+ * parent type.
+ * @mas: The maple state
+ * @node: The maple_enode to extract the parent's enum
+ * Return: The node->parent maple_type
+ */
+static inline
+enum maple_type mte_parent_enum(struct maple_enode *p_enode,
+ struct maple_tree *mt)
+{
+ unsigned long p_type;
+
+ p_type = (unsigned long)p_enode;
+ if (p_type & MAPLE_PARENT_ROOT)
+ return 0; /* Validated in the caller. */
+
+ p_type &= MAPLE_NODE_MASK;
+ p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
+
+ switch (p_type) {
+ case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
+ if (mt_is_alloc(mt))
+ return maple_arange_64;
+ return maple_range_64;
+ }
+
+ return 0;
+}
+
+static inline
+enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
+{
+ return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
+}
+
+/*
+ * mte_set_parent() - Set the parent node and encode the slot
+ * @enode: The encoded maple node.
+ * @parent: The encoded maple node that is the parent of @enode.
+ * @slot: The slot that @enode resides in @parent.
+ *
+ * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
+ * parent type.
+ */
+static inline
+void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
+ unsigned char slot)
+{
+ unsigned long val = (unsigned long) parent;
+ unsigned long shift;
+ unsigned long type;
+ enum maple_type p_type = mte_node_type(parent);
+
+ BUG_ON(p_type == maple_dense);
+ BUG_ON(p_type == maple_leaf_64);
+
+ switch (p_type) {
+ case maple_range_64:
+ case maple_arange_64:
+ shift = MAPLE_PARENT_SLOT_SHIFT;
+ type = MAPLE_PARENT_RANGE64;
+ break;
+ default:
+ case maple_dense:
+ case maple_leaf_64:
+ shift = type = 0;
+ break;
+ }
+
+ val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
+ val |= (slot << shift) | type;
+ mte_to_node(enode)->parent = ma_parent_ptr(val);
+}
+
+/*
+ * mte_parent_slot() - get the parent slot of @enode.
+ * @enode: The encoded maple node.
+ *
+ * Return: The slot in the parent node where @enode resides.
+ */
+static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+{
+ unsigned long val = (unsigned long) mte_to_node(enode)->parent;
+
+ /* Root. */
+ if (val & 1)
+ return 0;
+
+ /*
+ * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
+ * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
+ */
+ return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
+}
+
+/*
+ * mte_parent() - Get the parent of @node.
+ * @node: The encoded maple node.
+ *
+ * Return: The parent maple node.
+ */
+static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+{
+ return (void *)((unsigned long)
+ (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
+}
+
+/*
+ * ma_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static inline bool ma_dead_node(const struct maple_node *node)
+{
+ struct maple_node *parent = (void *)((unsigned long)
+ node->parent & ~MAPLE_NODE_MASK);
+
+ return (parent == node);
+}
+/*
+ * mte_dead_node() - check if the @enode is dead.
+ * @enode: The encoded maple node
+ *
+ * Return: true if dead, false otherwise.
+ */
+static inline bool mte_dead_node(const struct maple_enode *enode)
+{
+ struct maple_node *parent, *node;
+
+ node = mte_to_node(enode);
+ parent = mte_parent(enode);
+ return (parent == node);
+}
+
+/*
+ * mas_allocated() - Get the number of nodes allocated in a maple state.
+ * @mas: The maple state
+ *
+ * The ma_state alloc member is overloaded to hold a pointer to the first
+ * allocated node or to the number of requested nodes to allocate. If bit 0 is
+ * set, then the alloc contains the number of requested nodes. If there is an
+ * allocated node, then the total allocated nodes is in that node.
+ *
+ * Return: The total number of nodes allocated
+ */
+static inline unsigned long mas_allocated(const struct ma_state *mas)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
+ return 0;
+
+ return mas->alloc->total;
+}
+
+/*
+ * mas_set_alloc_req() - Set the requested number of allocations.
+ * @mas: the maple state
+ * @count: the number of allocations.
+ *
+ * The requested number of allocations is either in the first allocated node,
+ * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
+ * no allocated node. Set the request either in the node or do the necessary
+ * encoding to store in @mas->alloc directly.
+ */
+static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
+{
+ if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
+ if (!count)
+ mas->alloc = NULL;
+ else
+ mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
+ return;
+ }
+
+ mas->alloc->request_count = count;
+}
+
+/*
+ * mas_alloc_req() - get the requested number of allocations.
+ * @mas: The maple state
+ *
+ * The alloc count is either stored directly in @mas, or in
+ * @mas->alloc->request_count if there is at least one node allocated. Decode
+ * the request count if it's stored directly in @mas->alloc.
+ *
+ * Return: The allocation request count.
+ */
+static inline unsigned int mas_alloc_req(const struct ma_state *mas)
+{
+ if ((unsigned long)mas->alloc & 0x1)
+ return (unsigned long)(mas->alloc) >> 1;
+ else if (mas->alloc)
+ return mas->alloc->request_count;
+ return 0;
+}
+
+/*
+ * ma_pivots() - Get a pointer to the maple node pivots.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * Return: A pointer to the maple node pivots
+ */
+static inline unsigned long *ma_pivots(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.pivot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return node->mr64.pivot;
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * ma_gaps() - Get a pointer to the maple node gaps.
+ * @node - the maple node
+ * @type - the node type
+ *
+ * Return: A pointer to the maple node gaps
+ */
+static inline unsigned long *ma_gaps(struct maple_node *node,
+ enum maple_type type)
+{
+ switch (type) {
+ case maple_arange_64:
+ return node->ma64.gap;
+ case maple_range_64:
+ case maple_leaf_64:
+ case maple_dense:
+ return NULL;
+ }
+ return NULL;
+}
+
+/*
+ * mte_pivot() - Get the pivot at @piv of the maple encoded node.
+ * @mn: The maple encoded node.
+ * @piv: The pivot.
+ *
+ * Return: the pivot at @piv of @mn.
+ */
+static inline unsigned long mte_pivot(const struct maple_enode *mn,
+ unsigned char piv)
+{
+ struct maple_node *node = mte_to_node(mn);
+
+ if (piv >= mt_pivots[piv]) {
+ WARN_ON(1);
+ return 0;
+ }
+ switch (mte_node_type(mn)) {
+ case maple_arange_64:
+ return node->ma64.pivot[piv];
+ case maple_range_64:
+ case maple_leaf_64:
+ return node->mr64.pivot[piv];
+ case maple_dense:
+ return 0;
+ }
+ return 0;
+}
+
+/*
+ * mas_safe_pivot() - get the pivot at @piv or mas->max.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @piv: The pivot to fetch
+ * @type: The maple node type
+ *
+ * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
+ * otherwise.
+ */
+static inline unsigned long
+mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
+ unsigned char piv, enum maple_type type)
+{
+ if (piv >= mt_pivots[type])
+ return mas->max;
+
+ return pivots[piv];
+}
+
+/*
+ * mas_safe_min() - Return the minimum for a given offset.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @offset: The offset into the pivot array
+ *
+ * Return: The minimum range value that is contained in @offset.
+ */
+static inline unsigned long
+mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
+{
+ if (likely(offset))
+ return pivots[offset - 1] + 1;
+
+ return mas->min;
+}
+
+/*
+ * mas_logical_pivot() - Get the logical pivot of a given offset.
+ * @mas: The maple state
+ * @pivots: The pointer to the maple node pivots
+ * @offset: The offset into the pivot array
+ * @type: The maple node type
+ *
+ * When there is no value at a pivot (beyond the end of the data), then the
+ * pivot is actually @mas->max.
+ *
+ * Return: the logical pivot of a given @offset.
+ */
+static inline unsigned long
+mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
+ unsigned char offset, enum maple_type type)
+{
+ unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
+
+ if (likely(lpiv))
+ return lpiv;
+
+ if (likely(offset))
+ return mas->max;
+
+ return lpiv;
+}
+
+/*
+ * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
+ * @mn: The encoded maple node
+ * @piv: The pivot offset
+ * @val: The value of the pivot
+ */
+static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
+ unsigned long val)
+{
+ struct maple_node *node = mte_to_node(mn);
+ enum maple_type type = mte_node_type(mn);
+
+ BUG_ON(piv >= mt_pivots[type]);
+ switch (type) {
+ default:
+ case maple_range_64:
+ case maple_leaf_64:
+ node->mr64.pivot[piv] = val;
+ break;
+ case maple_arange_64:
+ node->ma64.pivot[piv] = val;
+ break;
+ case maple_dense:
+ break;
+ }
+
+}
+
+/*
+ * ma_slots() - Get a pointer to the maple node slots.
+ * @mn: The maple node
+ * @mt: The maple node type
+ *
+ * Return: A pointer to the maple node slots
+ */
+static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
+{
+ switch (mt) {
+ default:
+ case maple_arange_64:
+ return mn->ma64.slot;
+ case maple_range_64:
+ case maple_leaf_64:
+ return mn->mr64.slot;
+ case maple_dense:
+ return mn->slot;
+ }
+}
+
+static inline bool mt_locked(const struct maple_tree *mt)
+{
+ return mt_external_lock(mt) ? mt_lock_is_held(mt) :
+ lockdep_is_held(&mt->ma_lock);
+}
+
+static inline void *mt_slot(const struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
+{
+ return rcu_dereference_check(slots[offset], mt_locked(mt));
+}
+
+/*
+ * mas_slot_locked() - Get the slot value when holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset.
+ */
+static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
+{
+ return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
+}
+
+/*
+ * mas_slot() - Get the slot value when not holding the maple tree lock.
+ * @mas: The maple state
+ * @slots: The pointer to the slots
+ * @offset: The offset into the slots array to fetch
+ *
+ * Return: The entry stored in @slots at the @offset
+ */
+static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
+{
+ return mt_slot(mas->tree, slots, offset);
+}
+
+/*
+ * mas_root() - Get the maple tree root.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static inline void *mas_root(struct ma_state *mas)
+{
+ return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
+}
+
+static inline void *mt_root_locked(struct maple_tree *mt)
+{
+ return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
+}
+
+/*
+ * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
+ * @mas: The maple state.
+ *
+ * Return: The pointer to the root of the tree
+ */
+static inline void *mas_root_locked(struct ma_state *mas)
+{
+ return mt_root_locked(mas->tree);
+}
+
+static inline struct maple_metadata *ma_meta(struct maple_node *mn,
+ enum maple_type mt)
+{
+ switch (mt) {
+ case maple_arange_64:
+ return &mn->ma64.meta;
+ default:
+ return &mn->mr64.meta;
+ }
+}
+
+/*
+ * ma_set_meta() - Set the metadata information of a node.
+ * @mn: The maple node
+ * @mt: The maple node type
+ * @offset: The offset of the highest sub-gap in this node.
+ * @end: The end of the data in this node.
+ */
+static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset, unsigned char end)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+ meta->end = end;
+}
+
+/*
+ * ma_meta_end() - Get the data end of a node from the metadata
+ * @mn: The maple node
+ * @mt: The maple node type
+ */
+static inline unsigned char ma_meta_end(struct maple_node *mn,
+ enum maple_type mt)
+{
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ return meta->end;
+}
+
+/*
+ * ma_meta_gap() - Get the largest gap location of a node from the metadata
+ * @mn: The maple node
+ * @mt: The maple node type
+ */
+static inline unsigned char ma_meta_gap(struct maple_node *mn,
+ enum maple_type mt)
+{
+ BUG_ON(mt != maple_arange_64);
+
+ return mn->ma64.meta.gap;
+}
+
+/*
+ * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
+ * @mn: The maple node
+ * @mn: The maple node type
+ * @offset: The location of the largest gap.
+ */
+static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
+ unsigned char offset)
+{
+
+ struct maple_metadata *meta = ma_meta(mn, mt);
+
+ meta->gap = offset;
+}
+
+/*
+ * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
+ * @mat - the ma_topiary, a linked list of dead nodes.
+ * @dead_enode - the node to be marked as dead and added to the tail of the list
+ *
+ * Add the @dead_enode to the linked list in @mat.
+ */
+static inline void mat_add(struct ma_topiary *mat,
+ struct maple_enode *dead_enode)
+{
+ mte_set_node_dead(dead_enode);
+ mte_to_mat(dead_enode)->next = NULL;
+ if (!mat->tail) {
+ mat->tail = mat->head = dead_enode;
+ return;
+ }
+
+ mte_to_mat(mat->tail)->next = dead_enode;
+ mat->tail = dead_enode;
+}
+
+static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
+static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
+
+/*
+ * mas_mat_free() - Free all nodes in a dead list.
+ * @mas - the maple state
+ * @mat - the ma_topiary linked list of dead nodes to free.
+ *
+ * Free walk a dead list.
+ */
+static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
+{
+ struct maple_enode *next;
+
+ while (mat->head) {
+ next = mte_to_mat(mat->head)->next;
+ mas_free(mas, mat->head);
+ mat->head = next;
+ }
+}
+
+/*
+ * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
+ * @mas - the maple state
+ * @mat - the ma_topiary linked list of dead nodes to free.
+ *
+ * Destroy walk a dead list.
+ */
+static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
+{
+ struct maple_enode *next;
+
+ while (mat->head) {
+ next = mte_to_mat(mat->head)->next;
+ mte_destroy_walk(mat->head, mat->mtree);
+ mat->head = next;
+ }
+}
+/*
+ * mas_descend() - Descend into the slot stored in the ma_state.
+ * @mas - the maple state.
+ *
+ * Note: Not RCU safe, only use in write side or debug code.
+ */
+static inline void mas_descend(struct ma_state *mas)
+{
+ enum maple_type type;
+ unsigned long *pivots;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ node = mas_mn(mas);
+ type = mte_node_type(mas->node);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+
+ if (mas->offset)
+ mas->min = pivots[mas->offset - 1] + 1;
+ mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
+ mas->node = mas_slot(mas, slots, mas->offset);
+}
+
+/*
+ * mte_set_gap() - Set a maple node gap.
+ * @mn: The encoded maple node
+ * @gap: The offset of the gap to set
+ * @val: The gap value
+ */
+static inline void mte_set_gap(const struct maple_enode *mn,
+ unsigned char gap, unsigned long val)
+{
+ switch (mte_node_type(mn)) {
+ default:
+ break;
+ case maple_arange_64:
+ mte_to_node(mn)->ma64.gap[gap] = val;
+ break;
+ }
+}
+
+/*
+ * mas_ascend() - Walk up a level of the tree.
+ * @mas: The maple state
+ *
+ * Sets the @mas->max and @mas->min to the correct values when walking up. This
+ * may cause several levels of walking up to find the correct min and max.
+ * May find a dead node which will cause a premature return.
+ * Return: 1 on dead node, 0 otherwise
+ */
+static int mas_ascend(struct ma_state *mas)
+{
+ struct maple_enode *p_enode; /* parent enode. */
+ struct maple_enode *a_enode; /* ancestor enode. */
+ struct maple_node *a_node; /* ancestor node. */
+ struct maple_node *p_node; /* parent node. */
+ unsigned char a_slot;
+ enum maple_type a_type;
+ unsigned long min, max;
+ unsigned long *pivots;
+ unsigned char offset;
+ bool set_max = false, set_min = false;
+
+ a_node = mas_mn(mas);
+ if (ma_is_root(a_node)) {
+ mas->offset = 0;
+ return 0;
+ }
+
+ p_node = mte_parent(mas->node);
+ if (unlikely(a_node == p_node))
+ return 1;
+ a_type = mas_parent_enum(mas, mas->node);
+ offset = mte_parent_slot(mas->node);
+ a_enode = mt_mk_node(p_node, a_type);
+
+ /* Check to make sure all parent information is still accurate */
+ if (p_node != mte_parent(mas->node))
+ return 1;
+
+ mas->node = a_enode;
+ mas->offset = offset;
+
+ if (mte_is_root(a_enode)) {
+ mas->max = ULONG_MAX;
+ mas->min = 0;
+ return 0;
+ }
+
+ min = 0;
+ max = ULONG_MAX;
+ do {
+ p_enode = a_enode;
+ a_type = mas_parent_enum(mas, p_enode);
+ a_node = mte_parent(p_enode);
+ a_slot = mte_parent_slot(p_enode);
+ pivots = ma_pivots(a_node, a_type);
+ a_enode = mt_mk_node(a_node, a_type);
+
+ if (!set_min && a_slot) {
+ set_min = true;
+ min = pivots[a_slot - 1] + 1;
+ }
+
+ if (!set_max && a_slot < mt_pivots[a_type]) {
+ set_max = true;
+ max = pivots[a_slot];
+ }
+
+ if (unlikely(ma_dead_node(a_node)))
+ return 1;
+
+ if (unlikely(ma_is_root(a_node)))
+ break;
+
+ } while (!set_min || !set_max);
+
+ mas->max = max;
+ mas->min = min;
+ return 0;
+}
+
+/*
+ * mas_pop_node() - Get a previously allocated maple node from the maple state.
+ * @mas: The maple state
+ *
+ * Return: A pointer to a maple node.
+ */
+static inline struct maple_node *mas_pop_node(struct ma_state *mas)
+{
+ struct maple_alloc *ret, *node = mas->alloc;
+ unsigned long total = mas_allocated(mas);
+
+ /* nothing or a request pending. */
+ if (unlikely(!total))
+ return NULL;
+
+ if (total == 1) {
+ /* single allocation in this ma_state */
+ mas->alloc = NULL;
+ ret = node;
+ goto single_node;
+ }
+
+ if (!node->node_count) {
+ /* Single allocation in this node. */
+ mas->alloc = node->slot[0];
+ node->slot[0] = NULL;
+ mas->alloc->total = node->total - 1;
+ ret = node;
+ goto new_head;
+ }
+
+ node->total--;
+ ret = node->slot[node->node_count];
+ node->slot[node->node_count--] = NULL;
+
+single_node:
+new_head:
+ ret->total = 0;
+ ret->node_count = 0;
+ if (ret->request_count) {
+ mas_set_alloc_req(mas, ret->request_count + 1);
+ ret->request_count = 0;
+ }
+ return (struct maple_node *)ret;
+}
+
+/*
+ * mas_push_node() - Push a node back on the maple state allocation.
+ * @mas: The maple state
+ * @used: The used maple node
+ *
+ * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
+ * requested node count as necessary.
+ */
+static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
+{
+ struct maple_alloc *reuse = (struct maple_alloc *)used;
+ struct maple_alloc *head = mas->alloc;
+ unsigned long count;
+ unsigned int requested = mas_alloc_req(mas);
+
+ memset(reuse, 0, sizeof(*reuse));
+ count = mas_allocated(mas);
+
+ if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
+ if (head->slot[0])
+ head->node_count++;
+ head->slot[head->node_count] = reuse;
+ head->total++;
+ goto done;
+ }
+
+ reuse->total = 1;
+ if ((head) && !((unsigned long)head & 0x1)) {
+ head->request_count = 0;
+ reuse->slot[0] = head;
+ reuse->total += head->total;
+ }
+
+ mas->alloc = reuse;
+done:
+ if (requested > 1)
+ mas_set_alloc_req(mas, requested - 1);
+}
+
+/*
+ * mas_alloc_nodes() - Allocate nodes into a maple state
+ * @mas: The maple state
+ * @gfp: The GFP Flags
+ */
+static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
+{
+ struct maple_alloc *node;
+ struct maple_alloc **nodep = &mas->alloc;
+ unsigned long allocated = mas_allocated(mas);
+ unsigned long success = allocated;
+ unsigned int requested = mas_alloc_req(mas);
+ unsigned int count;
+ void **slots = NULL;
+ unsigned int max_req = 0;
+
+ if (!requested)
+ return;
+
+ mas_set_alloc_req(mas, 0);
+ if (mas->mas_flags & MA_STATE_PREALLOC) {
+ if (allocated)
+ return;
+ WARN_ON(!allocated);
+ }
+
+ if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
+ node = (struct maple_alloc *)mt_alloc_one(gfp);
+ if (!node)
+ goto nomem_one;
+
+ if (allocated)
+ node->slot[0] = mas->alloc;
+
+ success++;
+ mas->alloc = node;
+ requested--;
+ }
+
+ node = mas->alloc;
+ while (requested) {
+ max_req = MAPLE_ALLOC_SLOTS;
+ if (node->slot[0]) {
+ unsigned int offset = node->node_count + 1;
+
+ slots = (void **)&node->slot[offset];
+ max_req -= offset;
+ } else {
+ slots = (void **)&node->slot;
+ }
+
+ max_req = min(requested, max_req);
+ count = mt_alloc_bulk(gfp, max_req, slots);
+ if (!count)
+ goto nomem_bulk;
+
+ node->node_count += count;
+ /* zero indexed. */
+ if (slots == (void **)&node->slot)
+ node->node_count--;
+
+ success += count;
+ nodep = &node->slot[0];
+ node = *nodep;
+ requested -= count;
+ }
+ mas->alloc->total = success;
+ return;
+
+nomem_bulk:
+ /* Clean up potential freed allocations on bulk failure */
+ memset(slots, 0, max_req * sizeof(unsigned long));
+nomem_one:
+ mas_set_alloc_req(mas, requested);
+ if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
+ mas->alloc->total = success;
+ mas_set_err(mas, -ENOMEM);
+ return;
+
+}
+
+/*
+ * mas_free() - Free an encoded maple node
+ * @mas: The maple state
+ * @used: The encoded maple node to free.
+ *
+ * Uses rcu free if necessary, pushes @used back on the maple state allocations
+ * otherwise.
+ */
+static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
+{
+ struct maple_node *tmp = mte_to_node(used);
+
+ if (mt_in_rcu(mas->tree))
+ ma_free_rcu(tmp);
+ else
+ mas_push_node(mas, tmp);
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ * @gfp: the gfp flags
+ */
+static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
+{
+ unsigned long allocated = mas_allocated(mas);
+
+ if (allocated < count) {
+ mas_set_alloc_req(mas, count - allocated);
+ mas_alloc_nodes(mas, gfp);
+ }
+}
+
+/*
+ * mas_node_count() - Check if enough nodes are allocated and request more if
+ * there is not enough nodes.
+ * @mas: The maple state
+ * @count: The number of nodes needed
+ *
+ * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
+ */
+static void mas_node_count(struct ma_state *mas, int count)
+{
+ return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+/*
+ * mas_start() - Sets up maple state for operations.
+ * @mas: The maple state.
+ *
+ * If mas->node == MAS_START, then set the min, max, depth, and offset to
+ * defaults.
+ *
+ * Return:
+ * - If mas->node is an error or not MAS_START, return NULL.
+ * - If it's an empty tree: NULL & mas->node == MAS_NONE
+ * - If it's a single entry: The entry & mas->node == MAS_ROOT
+ * - If it's a tree: NULL & mas->node == safe root node.
+ */
+static inline struct maple_enode *mas_start(struct ma_state *mas)
+{
+ if (likely(mas_is_start(mas))) {
+ struct maple_enode *root;
+
+ mas->node = MAS_NONE;
+ mas->min = 0;
+ mas->max = ULONG_MAX;
+ mas->depth = 0;
+ mas->offset = 0;
+
+ root = mas_root(mas);
+ /* Tree with nodes */
+ if (likely(xa_is_node(root))) {
+ mas->node = mte_safe_root(root);
+ return NULL;
+ }
+
+ /* empty tree */
+ if (unlikely(!root)) {
+ mas->offset = MAPLE_NODE_SLOTS;
+ return NULL;
+ }
+
+ /* Single entry tree */
+ mas->node = MAS_ROOT;
+ mas->offset = MAPLE_NODE_SLOTS;
+
+ /* Single entry tree. */
+ if (mas->index > 0)
+ return NULL;
+
+ return root;
+ }
+
+ return NULL;
+}
+
+/*
+ * ma_data_end() - Find the end of the data in a node.
+ * @node: The maple node
+ * @type: The maple node type
+ * @pivots: The array of pivots in the node
+ * @max: The maximum value in the node
+ *
+ * Uses metadata to find the end of the data when possible.
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type,
+ unsigned long *pivots,
+ unsigned long max)
+{
+ unsigned char offset;
+
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_data_end() - Find the end of the data (slot).
+ * @mas: the maple state
+ *
+ * This method is optimized to check the metadata of a node if the node type
+ * supports data end metadata.
+ *
+ * Return: The zero indexed last slot with data (may be null).
+ */
+static inline unsigned char mas_data_end(struct ma_state *mas)
+{
+ enum maple_type type;
+ struct maple_node *node;
+ unsigned char offset;
+ unsigned long *pivots;
+
+ type = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ if (type == maple_arange_64)
+ return ma_meta_end(node, type);
+
+ pivots = ma_pivots(node, type);
+ offset = mt_pivots[type] - 1;
+ if (likely(!pivots[offset]))
+ return ma_meta_end(node, type);
+
+ if (likely(pivots[offset] == mas->max))
+ return offset;
+
+ return mt_pivots[type];
+}
+
+/*
+ * mas_leaf_max_gap() - Returns the largest gap in a leaf node
+ * @mas - the maple state
+ *
+ * Return: The maximum gap in the leaf.
+ */
+static unsigned long mas_leaf_max_gap(struct ma_state *mas)
+{
+ enum maple_type mt;
+ unsigned long pstart, gap, max_gap;
+ struct maple_node *mn;
+ unsigned long *pivots;
+ void __rcu **slots;
+ unsigned char i;
+ unsigned char max_piv;
+
+ mt = mte_node_type(mas->node);
+ mn = mas_mn(mas);
+ slots = ma_slots(mn, mt);
+ max_gap = 0;
+ if (unlikely(ma_is_dense(mt))) {
+ gap = 0;
+ for (i = 0; i < mt_slots[mt]; i++) {
+ if (slots[i]) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ } else {
+ gap++;
+ }
+ }
+ if (gap > max_gap)
+ max_gap = gap;
+ return max_gap;
+ }
+
+ /*
+ * Check the first implied pivot optimizes the loop below and slot 1 may
+ * be skipped if there is a gap in slot 0.
+ */
+ pivots = ma_pivots(mn, mt);
+ if (likely(!slots[0])) {
+ max_gap = pivots[0] - mas->min + 1;
+ i = 2;
+ } else {
+ i = 1;
+ }
+
+ /* reduce max_piv as the special case is checked before the loop */
+ max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
+ /*
+ * Check end implied pivot which can only be a gap on the right most
+ * node.
+ */
+ if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
+ gap = ULONG_MAX - pivots[max_piv];
+ if (gap > max_gap)
+ max_gap = gap;
+ }
+
+ for (; i <= max_piv; i++) {
+ /* data == no gap. */
+ if (likely(slots[i]))
+ continue;
+
+ pstart = pivots[i - 1];
+ gap = pivots[i] - pstart;
+ if (gap > max_gap)
+ max_gap = gap;
+
+ /* There cannot be two gaps in a row. */
+ i++;
+ }
+ return max_gap;
+}
+
+/*
+ * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
+ * @node: The maple node
+ * @gaps: The pointer to the gaps
+ * @mt: The maple node type
+ * @*off: Pointer to store the offset location of the gap.
+ *
+ * Uses the metadata data end to scan backwards across set gaps.
+ *
+ * Return: The maximum gap value
+ */
+static inline unsigned long
+ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
+ unsigned char *off)
+{
+ unsigned char offset, i;
+ unsigned long max_gap = 0;
+
+ i = offset = ma_meta_end(node, mt);
+ do {
+ if (gaps[i] > max_gap) {
+ max_gap = gaps[i];
+ offset = i;
+ }
+ } while (i--);
+
+ *off = offset;
+ return max_gap;
+}
+
+/*
+ * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
+ * @mas: The maple state.
+ *
+ * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
+ *
+ * Return: The gap value.
+ */
+static inline unsigned long mas_max_gap(struct ma_state *mas)
+{
+ unsigned long *gaps;
+ unsigned char offset;
+ enum maple_type mt;
+ struct maple_node *node;
+
+ mt = mte_node_type(mas->node);
+ if (ma_is_leaf(mt))
+ return mas_leaf_max_gap(mas);
+
+ node = mas_mn(mas);
+ offset = ma_meta_gap(node, mt);
+ if (offset == MAPLE_ARANGE64_META_MAX)
+ return 0;
+
+ gaps = ma_gaps(node, mt);
+ return gaps[offset];
+}
+
+/*
+ * mas_parent_gap() - Set the parent gap and any gaps above, as needed
+ * @mas: The maple state
+ * @offset: The gap offset in the parent to set
+ * @new: The new gap value.
+ *
+ * Set the parent gap then continue to set the gap upwards, using the metadata
+ * of the parent to see if it is necessary to check the node above.
+ */
+static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
+ unsigned long new)
+{
+ unsigned long meta_gap = 0;
+ struct maple_node *pnode;
+ struct maple_enode *penode;
+ unsigned long *pgaps;
+ unsigned char meta_offset;
+ enum maple_type pmt;
+
+ pnode = mte_parent(mas->node);
+ pmt = mas_parent_enum(mas, mas->node);
+ penode = mt_mk_node(pnode, pmt);
+ pgaps = ma_gaps(pnode, pmt);
+
+ascend:
+ meta_offset = ma_meta_gap(pnode, pmt);
+ if (meta_offset == MAPLE_ARANGE64_META_MAX)
+ meta_gap = 0;
+ else
+ meta_gap = pgaps[meta_offset];
+
+ pgaps[offset] = new;
+
+ if (meta_gap == new)
+ return;
+
+ if (offset != meta_offset) {
+ if (meta_gap > new)
+ return;
+
+ ma_set_meta_gap(pnode, pmt, offset);
+ } else if (new < meta_gap) {
+ meta_offset = 15;
+ new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
+ ma_set_meta_gap(pnode, pmt, meta_offset);
+ }
+
+ if (ma_is_root(pnode))
+ return;
+
+ /* Go to the parent node. */
+ pnode = mte_parent(penode);
+ pmt = mas_parent_enum(mas, penode);
+ pgaps = ma_gaps(pnode, pmt);
+ offset = mte_parent_slot(penode);
+ penode = mt_mk_node(pnode, pmt);
+ goto ascend;
+}
+
+/*
+ * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
+ * @mas - the maple state.
+ */
+static inline void mas_update_gap(struct ma_state *mas)
+{
+ unsigned char pslot;
+ unsigned long p_gap;
+ unsigned long max_gap;
+
+ if (!mt_is_alloc(mas->tree))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ max_gap = mas_max_gap(mas);
+
+ pslot = mte_parent_slot(mas->node);
+ p_gap = ma_gaps(mte_parent(mas->node),
+ mas_parent_enum(mas, mas->node))[pslot];
+
+ if (p_gap != max_gap)
+ mas_parent_gap(mas, pslot, max_gap);
+}
+
+/*
+ * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
+ * @parent with the slot encoded.
+ * @mas - the maple state (for the tree)
+ * @parent - the maple encoded node containing the children.
+ */
+static inline void mas_adopt_children(struct ma_state *mas,
+ struct maple_enode *parent)
+{
+ enum maple_type type = mte_node_type(parent);
+ struct maple_node *node = mas_mn(mas);
+ void __rcu **slots = ma_slots(node, type);
+ unsigned long *pivots = ma_pivots(node, type);
+ struct maple_enode *child;
+ unsigned char offset;
+
+ offset = ma_data_end(node, type, pivots, mas->max);
+ do {
+ child = mas_slot_locked(mas, slots, offset);
+ mte_set_parent(child, parent, offset);
+ } while (offset--);
+}
+
+/*
+ * mas_replace() - Replace a maple node in the tree with mas->node. Uses the
+ * parent encoding to locate the maple node in the tree.
+ * @mas - the ma_state to use for operations.
+ * @advanced - boolean to adopt the child nodes and free the old node (false) or
+ * leave the node (true) and handle the adoption and free elsewhere.
+ */
+static inline void mas_replace(struct ma_state *mas, bool advanced)
+ __must_hold(mas->tree->lock)
+{
+ struct maple_node *mn = mas_mn(mas);
+ struct maple_enode *old_enode;
+ unsigned char offset = 0;
+ void __rcu **slots = NULL;
+
+ if (ma_is_root(mn)) {
+ old_enode = mas_root_locked(mas);
+ } else {
+ offset = mte_parent_slot(mas->node);
+ slots = ma_slots(mte_parent(mas->node),
+ mas_parent_enum(mas, mas->node));
+ old_enode = mas_slot_locked(mas, slots, offset);
+ }
+
+ if (!advanced && !mte_is_leaf(mas->node))
+ mas_adopt_children(mas, mas->node);
+
+ if (mte_is_root(mas->node)) {
+ mn->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ mas_set_height(mas);
+ } else {
+ rcu_assign_pointer(slots[offset], mas->node);
+ }
+
+ if (!advanced)
+ mas_free(mas, old_enode);
+}
+
+/*
+ * mas_new_child() - Find the new child of a node.
+ * @mas: the maple state
+ * @child: the maple state to store the child.
+ */
+static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
+ __must_hold(mas->tree->lock)
+{
+ enum maple_type mt;
+ unsigned char offset;
+ unsigned char end;
+ unsigned long *pivots;
+ struct maple_enode *entry;
+ struct maple_node *node;
+ void __rcu **slots;
+
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ end = ma_data_end(node, mt, pivots, mas->max);
+ for (offset = mas->offset; offset <= end; offset++) {
+ entry = mas_slot_locked(mas, slots, offset);
+ if (mte_parent(entry) == node) {
+ *child = *mas;
+ mas->offset = offset + 1;
+ child->offset = offset;
+ mas_descend(child);
+ child->offset = 0;
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
+ * old data or set b_node->b_end.
+ * @b_node: the maple_big_node
+ * @shift: the shift count
+ */
+static inline void mab_shift_right(struct maple_big_node *b_node,
+ unsigned char shift)
+{
+ unsigned long size = b_node->b_end * sizeof(unsigned long);
+
+ memmove(b_node->pivot + shift, b_node->pivot, size);
+ memmove(b_node->slot + shift, b_node->slot, size);
+ if (b_node->type == maple_arange_64)
+ memmove(b_node->gap + shift, b_node->gap, size);
+}
+
+/*
+ * mab_middle_node() - Check if a middle node is needed (unlikely)
+ * @b_node: the maple_big_node that contains the data.
+ * @size: the amount of data in the b_node
+ * @split: the potential split location
+ * @slot_count: the size that can be stored in a single node being considered.
+ *
+ * Return: true if a middle node is required.
+ */
+static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
+ unsigned char slot_count)
+{
+ unsigned char size = b_node->b_end;
+
+ if (size >= 2 * slot_count)
+ return true;
+
+ if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
+ return true;
+
+ return false;
+}
+
+/*
+ * mab_no_null_split() - ensure the split doesn't fall on a NULL
+ * @b_node: the maple_big_node with the data
+ * @split: the suggested split location
+ * @slot_count: the number of slots in the node being considered.
+ *
+ * Return: the split location.
+ */
+static inline int mab_no_null_split(struct maple_big_node *b_node,
+ unsigned char split, unsigned char slot_count)
+{
+ if (!b_node->slot[split]) {
+ /*
+ * If the split is less than the max slot && the right side will
+ * still be sufficient, then increment the split on NULL.
+ */
+ if ((split < slot_count - 1) &&
+ (b_node->b_end - split) > (mt_min_slots[b_node->type]))
+ split++;
+ else
+ split--;
+ }
+ return split;
+}
+
+/*
+ * mab_calc_split() - Calculate the split location and if there needs to be two
+ * splits.
+ * @bn: The maple_big_node with the data
+ * @mid_split: The second split, if required. 0 otherwise.
+ *
+ * Return: The first split location. The middle split is set in @mid_split.
+ */
+static inline int mab_calc_split(struct ma_state *mas,
+ struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
+{
+ unsigned char b_end = bn->b_end;
+ int split = b_end / 2; /* Assume equal split. */
+ unsigned char slot_min, slot_count = mt_slots[bn->type];
+
+ /*
+ * To support gap tracking, all NULL entries are kept together and a node cannot
+ * end on a NULL entry, with the exception of the left-most leaf. The
+ * limitation means that the split of a node must be checked for this condition
+ * and be able to put more data in one direction or the other.
+ */
+ if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
+ *mid_split = 0;
+ split = b_end - mt_min_slots[bn->type];
+
+ if (!ma_is_leaf(bn->type))
+ return split;
+
+ mas->mas_flags |= MA_STATE_REBALANCE;
+ if (!bn->slot[split])
+ split--;
+ return split;
+ }
+
+ /*
+ * Although extremely rare, it is possible to enter what is known as the 3-way
+ * split scenario. The 3-way split comes about by means of a store of a range
+ * that overwrites the end and beginning of two full nodes. The result is a set
+ * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
+ * also be located in different parent nodes which are also full. This can
+ * carry upwards all the way to the root in the worst case.
+ */
+ if (unlikely(mab_middle_node(bn, split, slot_count))) {
+ split = b_end / 3;
+ *mid_split = split * 2;
+ } else {
+ slot_min = mt_min_slots[bn->type];
+
+ *mid_split = 0;
+ /*
+ * Avoid having a range less than the slot count unless it
+ * causes one node to be deficient.
+ * NOTE: mt_min_slots is 1 based, b_end and split are zero.
+ */
+ while (((bn->pivot[split] - min) < slot_count - 1) &&
+ (split < slot_count - 1) && (b_end - split > slot_min))
+ split++;
+ }
+
+ /* Avoid ending a node on a NULL entry */
+ split = mab_no_null_split(bn, split, slot_count);
+ if (!(*mid_split))
+ return split;
+
+ *mid_split = mab_no_null_split(bn, *mid_split, slot_count);
+
+ return split;
+}
+
+/*
+ * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
+ * and set @b_node->b_end to the next free slot.
+ * @mas: The maple state
+ * @mas_start: The starting slot to copy
+ * @mas_end: The end slot to copy (inclusively)
+ * @b_node: The maple_big_node to place the data
+ * @mab_start: The starting location in maple_big_node to store the data.
+ */
+static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
+ unsigned char mas_end, struct maple_big_node *b_node,
+ unsigned char mab_start)
+{
+ enum maple_type mt;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots, *gaps;
+ int i = mas_start, j = mab_start;
+ unsigned char piv_end;
+
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ if (!i) {
+ b_node->pivot[j] = pivots[i++];
+ if (unlikely(i > mas_end))
+ goto complete;
+ j++;
+ }
+
+ piv_end = min(mas_end, mt_pivots[mt]);
+ for (; i < piv_end; i++, j++) {
+ b_node->pivot[j] = pivots[i];
+ if (unlikely(!b_node->pivot[j]))
+ break;
+
+ if (unlikely(mas->max == b_node->pivot[j]))
+ goto complete;
+ }
+
+ if (likely(i <= mas_end))
+ b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
+
+complete:
+ b_node->b_end = ++j;
+ j -= mab_start;
+ slots = ma_slots(node, mt);
+ memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
+ if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
+ gaps = ma_gaps(node, mt);
+ memcpy(b_node->gap + mab_start, gaps + mas_start,
+ sizeof(unsigned long) * j);
+ }
+}
+
+/*
+ * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
+ * @mas: The maple state
+ * @node: The maple node
+ * @pivots: pointer to the maple node pivots
+ * @mt: The maple type
+ * @end: The assumed end
+ *
+ * Note, end may be incremented within this function but not modified at the
+ * source. This is fine since the metadata is the last thing to be stored in a
+ * node during a write.
+ */
+static inline void mas_leaf_set_meta(struct ma_state *mas,
+ struct maple_node *node, unsigned long *pivots,
+ enum maple_type mt, unsigned char end)
+{
+ /* There is no room for metadata already */
+ if (mt_pivots[mt] <= end)
+ return;
+
+ if (pivots[end] && pivots[end] < mas->max)
+ end++;
+
+ if (end < mt_slots[mt] - 1)
+ ma_set_meta(node, mt, 0, end);
+}
+
+/*
+ * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
+ * @b_node: the maple_big_node that has the data
+ * @mab_start: the start location in @b_node.
+ * @mab_end: The end location in @b_node (inclusively)
+ * @mas: The maple state with the maple encoded node.
+ */
+static inline void mab_mas_cp(struct maple_big_node *b_node,
+ unsigned char mab_start, unsigned char mab_end,
+ struct ma_state *mas, bool new_max)
+{
+ int i, j = 0;
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node *node = mte_to_node(mas->node);
+ void __rcu **slots = ma_slots(node, mt);
+ unsigned long *pivots = ma_pivots(node, mt);
+ unsigned long *gaps = NULL;
+ unsigned char end;
+
+ if (mab_end - mab_start > mt_pivots[mt])
+ mab_end--;
+
+ if (!pivots[mt_pivots[mt] - 1])
+ slots[mt_pivots[mt]] = NULL;
+
+ i = mab_start;
+ do {
+ pivots[j++] = b_node->pivot[i++];
+ } while (i <= mab_end && likely(b_node->pivot[i]));
+
+ memcpy(slots, b_node->slot + mab_start,
+ sizeof(void *) * (i - mab_start));
+
+ if (new_max)
+ mas->max = b_node->pivot[i - 1];
+
+ end = j - 1;
+ if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
+ unsigned long max_gap = 0;
+ unsigned char offset = 15;
+
+ gaps = ma_gaps(node, mt);
+ do {
+ gaps[--j] = b_node->gap[--i];
+ if (gaps[j] > max_gap) {
+ offset = j;
+ max_gap = gaps[j];
+ }
+ } while (j);
+
+ ma_set_meta(node, mt, offset, end);
+ } else {
+ mas_leaf_set_meta(mas, node, pivots, mt, end);
+ }
+}
+
+/*
+ * mas_descend_adopt() - Descend through a sub-tree and adopt children.
+ * @mas: the maple state with the maple encoded node of the sub-tree.
+ *
+ * Descend through a sub-tree and adopt children who do not have the correct
+ * parents set. Follow the parents which have the correct parents as they are
+ * the new entries which need to be followed to find other incorrectly set
+ * parents.
+ */
+static inline void mas_descend_adopt(struct ma_state *mas)
+{
+ struct ma_state list[3], next[3];
+ int i, n;
+
+ /*
+ * At each level there may be up to 3 correct parent pointers which indicates
+ * the new nodes which need to be walked to find any new nodes at a lower level.
+ */
+
+ for (i = 0; i < 3; i++) {
+ list[i] = *mas;
+ list[i].offset = 0;
+ next[i].offset = 0;
+ }
+ next[0] = *mas;
+
+ while (!mte_is_leaf(list[0].node)) {
+ n = 0;
+ for (i = 0; i < 3; i++) {
+ if (mas_is_none(&list[i]))
+ continue;
+
+ if (i && list[i-1].node == list[i].node)
+ continue;
+
+ while ((n < 3) && (mas_new_child(&list[i], &next[n])))
+ n++;
+
+ mas_adopt_children(&list[i], list[i].node);
+ }
+
+ while (n < 3)
+ next[n++].node = MAS_NONE;
+
+ /* descend by setting the list to the children */
+ for (i = 0; i < 3; i++)
+ list[i] = next[i];
+ }
+}
+
+/*
+ * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
+ * @mas: The maple state
+ * @end: The maple node end
+ * @mt: The maple node type
+ */
+static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
+ enum maple_type mt)
+{
+ if (!(mas->mas_flags & MA_STATE_BULK))
+ return;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ if (end > mt_min_slots[mt]) {
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ return;
+ }
+}
+
+/*
+ * mas_store_b_node() - Store an @entry into the b_node while also copying the
+ * data from a maple encoded node.
+ * @wr_mas: the maple write state
+ * @b_node: the maple_big_node to fill with data
+ * @offset_end: the offset to end copying
+ *
+ * Return: The actual end of the data stored in @b_node
+ */
+static inline void mas_store_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char offset_end)
+{
+ unsigned char slot;
+ unsigned char b_end;
+ /* Possible underflow of piv will wrap back to 0 before use. */
+ unsigned long piv;
+ struct ma_state *mas = wr_mas->mas;
+
+ b_node->type = wr_mas->type;
+ b_end = 0;
+ slot = mas->offset;
+ if (slot) {
+ /* Copy start data up to insert. */
+ mas_mab_cp(mas, 0, slot - 1, b_node, 0);
+ b_end = b_node->b_end;
+ piv = b_node->pivot[b_end - 1];
+ } else
+ piv = mas->min - 1;
+
+ if (piv + 1 < mas->index) {
+ /* Handle range starting after old range */
+ b_node->slot[b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = mas->index - 1 - piv;
+ b_node->pivot[b_end++] = mas->index - 1;
+ }
+
+ /* Store the new entry. */
+ mas->offset = b_end;
+ b_node->slot[b_end] = wr_mas->entry;
+ b_node->pivot[b_end] = mas->last;
+
+ /* Appended. */
+ if (mas->last >= mas->max)
+ goto b_end;
+
+ /* Handle new range ending before old range ends */
+ piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
+ if (piv > mas->last) {
+ if (piv == ULONG_MAX)
+ mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
+
+ if (offset_end != slot)
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ offset_end);
+
+ b_node->slot[++b_end] = wr_mas->content;
+ if (!wr_mas->content)
+ b_node->gap[b_end] = piv - mas->last + 1;
+ b_node->pivot[b_end] = piv;
+ }
+
+ slot = offset_end + 1;
+ if (slot > wr_mas->node_end)
+ goto b_end;
+
+ /* Copy end data to the end of the node. */
+ mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
+ b_node->b_end--;
+ return;
+
+b_end:
+ b_node->b_end = b_end;
+}
+
+/*
+ * mas_prev_sibling() - Find the previous node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: True if there is a previous sibling, false otherwise.
+ */
+static inline bool mas_prev_sibling(struct ma_state *mas)
+{
+ unsigned int p_slot = mte_parent_slot(mas->node);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ if (!p_slot)
+ return false;
+
+ mas_ascend(mas);
+ mas->offset = p_slot - 1;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mas_next_sibling() - Find the next node with the same parent.
+ * @mas: the maple state
+ *
+ * Return: true if there is a next sibling, false otherwise.
+ */
+static inline bool mas_next_sibling(struct ma_state *mas)
+{
+ MA_STATE(parent, mas->tree, mas->index, mas->last);
+
+ if (mte_is_root(mas->node))
+ return false;
+
+ parent = *mas;
+ mas_ascend(&parent);
+ parent.offset = mte_parent_slot(mas->node) + 1;
+ if (parent.offset > mas_data_end(&parent))
+ return false;
+
+ *mas = parent;
+ mas_descend(mas);
+ return true;
+}
+
+/*
+ * mte_node_or_node() - Return the encoded node or MAS_NONE.
+ * @enode: The encoded maple node.
+ *
+ * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
+ *
+ * Return: @enode or MAS_NONE
+ */
+static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+{
+ if (enode)
+ return enode;
+
+ return ma_enode_ptr(MAS_NONE);
+}
+
+/*
+ * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ */
+static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char count;
+ unsigned char offset;
+ unsigned long index, min, max;
+
+ if (unlikely(ma_is_dense(wr_mas->type))) {
+ wr_mas->r_max = wr_mas->r_min = mas->index;
+ mas->offset = mas->index = mas->min;
+ return;
+ }
+
+ wr_mas->node = mas_mn(wr_mas->mas);
+ wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
+ count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
+ offset = mas->offset;
+ min = mas_safe_min(mas, wr_mas->pivots, offset);
+ if (unlikely(offset == count))
+ goto max;
+
+ max = wr_mas->pivots[offset];
+ index = mas->index;
+ if (unlikely(index <= max))
+ goto done;
+
+ if (unlikely(!max && offset))
+ goto max;
+
+ min = max + 1;
+ while (++offset < count) {
+ max = wr_mas->pivots[offset];
+ if (index <= max)
+ goto done;
+ else if (unlikely(!max))
+ break;
+
+ min = max + 1;
+ }
+
+max:
+ max = mas->max;
+done:
+ wr_mas->r_max = max;
+ wr_mas->r_min = min;
+ wr_mas->offset_end = mas->offset = offset;
+}
+
+/*
+ * mas_topiary_range() - Add a range of slots to the topiary.
+ * @mas: The maple state
+ * @destroy: The topiary to add the slots (usually destroy)
+ * @start: The starting slot inclusively
+ * @end: The end slot inclusively
+ */
+static inline void mas_topiary_range(struct ma_state *mas,
+ struct ma_topiary *destroy, unsigned char start, unsigned char end)
+{
+ void __rcu **slots;
+ unsigned char offset;
+
+ MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
+ slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
+ for (offset = start; offset <= end; offset++) {
+ struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
+
+ if (mte_dead_node(enode))
+ continue;
+
+ mat_add(destroy, enode);
+ }
+}
+
+/*
+ * mast_topiary() - Add the portions of the tree to the removal list; either to
+ * be freed or discarded (destroy walk).
+ * @mast: The maple_subtree_state.
+ */
+static inline void mast_topiary(struct maple_subtree_state *mast)
+{
+ MA_WR_STATE(wr_mas, mast->orig_l, NULL);
+ unsigned char r_start, r_end;
+ unsigned char l_start, l_end;
+ void __rcu **l_slots, **r_slots;
+
+ wr_mas.type = mte_node_type(mast->orig_l->node);
+ mast->orig_l->index = mast->orig_l->last;
+ mas_wr_node_walk(&wr_mas);
+ l_start = mast->orig_l->offset + 1;
+ l_end = mas_data_end(mast->orig_l);
+ r_start = 0;
+ r_end = mast->orig_r->offset;
+
+ if (r_end)
+ r_end--;
+
+ l_slots = ma_slots(mas_mn(mast->orig_l),
+ mte_node_type(mast->orig_l->node));
+
+ r_slots = ma_slots(mas_mn(mast->orig_r),
+ mte_node_type(mast->orig_r->node));
+
+ if ((l_start < l_end) &&
+ mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
+ l_start++;
+ }
+
+ if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
+ if (r_end)
+ r_end--;
+ }
+
+ if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
+ return;
+
+ /* At the node where left and right sides meet, add the parts between */
+ if (mast->orig_l->node == mast->orig_r->node) {
+ return mas_topiary_range(mast->orig_l, mast->destroy,
+ l_start, r_end);
+ }
+
+ /* mast->orig_r is different and consumed. */
+ if (mte_is_leaf(mast->orig_r->node))
+ return;
+
+ if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
+ l_end--;
+
+
+ if (l_start <= l_end)
+ mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
+
+ if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
+ r_start++;
+
+ if (r_start <= r_end)
+ mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
+}
+
+/*
+ * mast_rebalance_next() - Rebalance against the next node
+ * @mast: The maple subtree state
+ * @old_r: The encoded maple node to the right (next node).
+ */
+static inline void mast_rebalance_next(struct maple_subtree_state *mast)
+{
+ unsigned char b_end = mast->bn->b_end;
+
+ mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
+ mast->bn, b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_rebalance_prev() - Rebalance against the previous node
+ * @mast: The maple subtree state
+ * @old_l: The encoded maple node to the left (previous node)
+ */
+static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
+{
+ unsigned char end = mas_data_end(mast->orig_l) + 1;
+ unsigned char b_end = mast->bn->b_end;
+
+ mab_shift_right(mast->bn, end);
+ mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
+ mast->l->min = mast->orig_l->min;
+ mast->orig_l->index = mast->orig_l->min;
+ mast->bn->b_end = end + b_end;
+ mast->l->offset += end;
+}
+
+/*
+ * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
+ * the node to the right. Checking the nodes to the right then the left at each
+ * level upwards until root is reached. Free and destroy as needed.
+ * Data is copied into the @mast->bn.
+ * @mast: The maple_subtree_state.
+ */
+static inline
+bool mast_spanning_rebalance(struct maple_subtree_state *mast)
+{
+ struct ma_state r_tmp = *mast->orig_r;
+ struct ma_state l_tmp = *mast->orig_l;
+ struct maple_enode *ancestor = NULL;
+ unsigned char start, end;
+ unsigned char depth = 0;
+
+ r_tmp = *mast->orig_r;
+ l_tmp = *mast->orig_l;
+ do {
+ mas_ascend(mast->orig_r);
+ mas_ascend(mast->orig_l);
+ depth++;
+ if (!ancestor &&
+ (mast->orig_r->node == mast->orig_l->node)) {
+ ancestor = mast->orig_r->node;
+ end = mast->orig_r->offset - 1;
+ start = mast->orig_l->offset + 1;
+ }
+
+ if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
+ if (!ancestor) {
+ ancestor = mast->orig_r->node;
+ start = 0;
+ }
+
+ mast->orig_r->offset++;
+ do {
+ mas_descend(mast->orig_r);
+ mast->orig_r->offset = 0;
+ depth--;
+ } while (depth);
+
+ mast_rebalance_next(mast);
+ do {
+ unsigned char l_off = 0;
+ struct maple_enode *child = r_tmp.node;
+
+ mas_ascend(&r_tmp);
+ if (ancestor == r_tmp.node)
+ l_off = start;
+
+ if (r_tmp.offset)
+ r_tmp.offset--;
+
+ if (l_off < r_tmp.offset)
+ mas_topiary_range(&r_tmp, mast->destroy,
+ l_off, r_tmp.offset);
+
+ if (l_tmp.node != child)
+ mat_add(mast->free, child);
+
+ } while (r_tmp.node != ancestor);
+
+ *mast->orig_l = l_tmp;
+ return true;
+
+ } else if (mast->orig_l->offset != 0) {
+ if (!ancestor) {
+ ancestor = mast->orig_l->node;
+ end = mas_data_end(mast->orig_l);
+ }
+
+ mast->orig_l->offset--;
+ do {
+ mas_descend(mast->orig_l);
+ mast->orig_l->offset =
+ mas_data_end(mast->orig_l);
+ depth--;
+ } while (depth);
+
+ mast_rebalance_prev(mast);
+ do {
+ unsigned char r_off;
+ struct maple_enode *child = l_tmp.node;
+
+ mas_ascend(&l_tmp);
+ if (ancestor == l_tmp.node)
+ r_off = end;
+ else
+ r_off = mas_data_end(&l_tmp);
+
+ if (l_tmp.offset < r_off)
+ l_tmp.offset++;
+
+ if (l_tmp.offset < r_off)
+ mas_topiary_range(&l_tmp, mast->destroy,
+ l_tmp.offset, r_off);
+
+ if (r_tmp.node != child)
+ mat_add(mast->free, child);
+
+ } while (l_tmp.node != ancestor);
+
+ *mast->orig_r = r_tmp;
+ return true;
+ }
+ } while (!mte_is_root(mast->orig_r->node));
+
+ *mast->orig_r = r_tmp;
+ *mast->orig_l = l_tmp;
+ return false;
+}
+
+/*
+ * mast_ascend_free() - Add current original maple state nodes to the free list
+ * and ascend.
+ * @mast: the maple subtree state.
+ *
+ * Ascend the original left and right sides and add the previous nodes to the
+ * free list. Set the slots to point to the correct location in the new nodes.
+ */
+static inline void
+mast_ascend_free(struct maple_subtree_state *mast)
+{
+ MA_WR_STATE(wr_mas, mast->orig_r, NULL);
+ struct maple_enode *left = mast->orig_l->node;
+ struct maple_enode *right = mast->orig_r->node;
+
+ mas_ascend(mast->orig_l);
+ mas_ascend(mast->orig_r);
+ mat_add(mast->free, left);
+
+ if (left != right)
+ mat_add(mast->free, right);
+
+ mast->orig_r->offset = 0;
+ mast->orig_r->index = mast->r->max;
+ /* last should be larger than or equal to index */
+ if (mast->orig_r->last < mast->orig_r->index)
+ mast->orig_r->last = mast->orig_r->index;
+ /*
+ * The node may not contain the value so set slot to ensure all
+ * of the nodes contents are freed or destroyed.
+ */
+ wr_mas.type = mte_node_type(mast->orig_r->node);
+ mas_wr_node_walk(&wr_mas);
+ /* Set up the left side of things */
+ mast->orig_l->offset = 0;
+ mast->orig_l->index = mast->l->min;
+ wr_mas.mas = mast->orig_l;
+ wr_mas.type = mte_node_type(mast->orig_l->node);
+ mas_wr_node_walk(&wr_mas);
+
+ mast->bn->type = wr_mas.type;
+}
+
+/*
+ * mas_new_ma_node() - Create and return a new maple node. Helper function.
+ * @mas: the maple state with the allocations.
+ * @b_node: the maple_big_node with the type encoding.
+ *
+ * Use the node type from the maple_big_node to allocate a new node from the
+ * ma_state. This function exists mainly for code readability.
+ *
+ * Return: A new maple encoded node
+ */
+static inline struct maple_enode
+*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
+{
+ return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
+}
+
+/*
+ * mas_mab_to_node() - Set up right and middle nodes
+ *
+ * @mas: the maple state that contains the allocations.
+ * @b_node: the node which contains the data.
+ * @left: The pointer which will have the left node
+ * @right: The pointer which may have the right node
+ * @middle: the pointer which may have the middle node (rare)
+ * @mid_split: the split location for the middle node
+ *
+ * Return: the split of left.
+ */
+static inline unsigned char mas_mab_to_node(struct ma_state *mas,
+ struct maple_big_node *b_node, struct maple_enode **left,
+ struct maple_enode **right, struct maple_enode **middle,
+ unsigned char *mid_split, unsigned long min)
+{
+ unsigned char split = 0;
+ unsigned char slot_count = mt_slots[b_node->type];
+
+ *left = mas_new_ma_node(mas, b_node);
+ *right = NULL;
+ *middle = NULL;
+ *mid_split = 0;
+
+ if (b_node->b_end < slot_count) {
+ split = b_node->b_end;
+ } else {
+ split = mab_calc_split(mas, b_node, mid_split, min);
+ *right = mas_new_ma_node(mas, b_node);
+ }
+
+ if (*mid_split)
+ *middle = mas_new_ma_node(mas, b_node);
+
+ return split;
+
+}
+
+/*
+ * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
+ * pointer.
+ * @b_node - the big node to add the entry
+ * @mas - the maple state to get the pivot (mas->max)
+ * @entry - the entry to add, if NULL nothing happens.
+ */
+static inline void mab_set_b_end(struct maple_big_node *b_node,
+ struct ma_state *mas,
+ void *entry)
+{
+ if (!entry)
+ return;
+
+ b_node->slot[b_node->b_end] = entry;
+ if (mt_is_alloc(mas->tree))
+ b_node->gap[b_node->b_end] = mas_max_gap(mas);
+ b_node->pivot[b_node->b_end++] = mas->max;
+}
+
+/*
+ * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
+ * of @mas->node to either @left or @right, depending on @slot and @split
+ *
+ * @mas - the maple state with the node that needs a parent
+ * @left - possible parent 1
+ * @right - possible parent 2
+ * @slot - the slot the mas->node was placed
+ * @split - the split location between @left and @right
+ */
+static inline void mas_set_split_parent(struct ma_state *mas,
+ struct maple_enode *left,
+ struct maple_enode *right,
+ unsigned char *slot, unsigned char split)
+{
+ if (mas_is_none(mas))
+ return;
+
+ if ((*slot) <= split)
+ mte_set_parent(mas->node, left, *slot);
+ else if (right)
+ mte_set_parent(mas->node, right, (*slot) - split - 1);
+
+ (*slot)++;
+}
+
+/*
+ * mte_mid_split_check() - Check if the next node passes the mid-split
+ * @**l: Pointer to left encoded maple node.
+ * @**m: Pointer to middle encoded maple node.
+ * @**r: Pointer to right encoded maple node.
+ * @slot: The offset
+ * @*split: The split location.
+ * @mid_split: The middle split.
+ */
+static inline void mte_mid_split_check(struct maple_enode **l,
+ struct maple_enode **r,
+ struct maple_enode *right,
+ unsigned char slot,
+ unsigned char *split,
+ unsigned char mid_split)
+{
+ if (*r == right)
+ return;
+
+ if (slot < mid_split)
+ return;
+
+ *l = *r;
+ *r = right;
+ *split = mid_split;
+}
+
+/*
+ * mast_set_split_parents() - Helper function to set three nodes parents. Slot
+ * is taken from @mast->l.
+ * @mast - the maple subtree state
+ * @left - the left node
+ * @right - the right node
+ * @split - the split location.
+ */
+static inline void mast_set_split_parents(struct maple_subtree_state *mast,
+ struct maple_enode *left,
+ struct maple_enode *middle,
+ struct maple_enode *right,
+ unsigned char split,
+ unsigned char mid_split)
+{
+ unsigned char slot;
+ struct maple_enode *l = left;
+ struct maple_enode *r = right;
+
+ if (mas_is_none(mast->l))
+ return;
+
+ if (middle)
+ r = middle;
+
+ slot = mast->l->offset;
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->l, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->m, l, r, &slot, split);
+
+ mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
+ mas_set_split_parent(mast->r, l, r, &slot, split);
+}
+
+/*
+ * mas_wmb_replace() - Write memory barrier and replace
+ * @mas: The maple state
+ * @free: the maple topiary list of nodes to free
+ * @destroy: The maple topiary list of nodes to destroy (walk and free)
+ *
+ * Updates gap as necessary.
+ */
+static inline void mas_wmb_replace(struct ma_state *mas,
+ struct ma_topiary *free,
+ struct ma_topiary *destroy)
+{
+ /* All nodes must see old data as dead prior to replacing that data */
+ smp_wmb(); /* Needed for RCU */
+
+ /* Insert the new data in the tree */
+ mas_replace(mas, true);
+
+ if (!mte_is_leaf(mas->node))
+ mas_descend_adopt(mas);
+
+ mas_mat_free(mas, free);
+
+ if (destroy)
+ mas_mat_destroy(mas, destroy);
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mast_new_root() - Set a new tree root during subtree creation
+ * @mast: The maple subtree state
+ * @mas: The maple state
+ */
+static inline void mast_new_root(struct maple_subtree_state *mast,
+ struct ma_state *mas)
+{
+ mas_mn(mast->l)->parent =
+ ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
+ if (!mte_dead_node(mast->orig_l->node) &&
+ !mte_is_root(mast->orig_l->node)) {
+ do {
+ mast_ascend_free(mast);
+ mast_topiary(mast);
+ } while (!mte_is_root(mast->orig_l->node));
+ }
+ if ((mast->orig_l->node != mas->node) &&
+ (mast->l->depth > mas_mt_height(mas))) {
+ mat_add(mast->free, mas->node);
+ }
+}
+
+/*
+ * mast_cp_to_nodes() - Copy data out to nodes.
+ * @mast: The maple subtree state
+ * @left: The left encoded maple node
+ * @middle: The middle encoded maple node
+ * @right: The right encoded maple node
+ * @split: The location to split between left and (middle ? middle : right)
+ * @mid_split: The location to split between middle and right.
+ */
+static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
+ struct maple_enode *left, struct maple_enode *middle,
+ struct maple_enode *right, unsigned char split, unsigned char mid_split)
+{
+ bool new_lmax = true;
+
+ mast->l->node = mte_node_or_none(left);
+ mast->m->node = mte_node_or_none(middle);
+ mast->r->node = mte_node_or_none(right);
+
+ mast->l->min = mast->orig_l->min;
+ if (split == mast->bn->b_end) {
+ mast->l->max = mast->orig_r->max;
+ new_lmax = false;
+ }
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
+
+ if (middle) {
+ mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
+ mast->m->min = mast->bn->pivot[split] + 1;
+ split = mid_split;
+ }
+
+ mast->r->max = mast->orig_r->max;
+ if (right) {
+ mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
+ mast->r->min = mast->bn->pivot[split] + 1;
+ }
+}
+
+/*
+ * mast_combine_cp_left - Copy in the original left side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
+{
+ unsigned char l_slot = mast->orig_l->offset;
+
+ if (!l_slot)
+ return;
+
+ mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
+}
+
+/*
+ * mast_combine_cp_right: Copy in the original right side of the tree into the
+ * combined data set in the maple subtree state big node.
+ * @mast: The maple subtree state
+ */
+static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
+{
+ if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
+ return;
+
+ mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
+ mt_slot_count(mast->orig_r->node), mast->bn,
+ mast->bn->b_end);
+ mast->orig_r->last = mast->orig_r->max;
+}
+
+/*
+ * mast_sufficient: Check if the maple subtree state has enough data in the big
+ * node to create at least one sufficient node
+ * @mast: the maple subtree state
+ */
+static inline bool mast_sufficient(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+/*
+ * mast_overflow: Check if there is too much data in the subtree state for a
+ * single node.
+ * @mast: The maple subtree state
+ */
+static inline bool mast_overflow(struct maple_subtree_state *mast)
+{
+ if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
+ return true;
+
+ return false;
+}
+
+static inline void *mtree_range_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next, *last;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+ unsigned long max, min;
+ unsigned long prev_max, prev_min;
+
+ last = next = mas->node;
+ prev_min = min = mas->min;
+ max = mas->max;
+ do {
+ offset = 0;
+ last = next;
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = ma_data_end(node, type, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+
+ if (pivots[offset] >= mas->index) {
+ prev_max = max;
+ prev_min = min;
+ max = pivots[offset];
+ goto next;
+ }
+
+ do {
+ offset++;
+ } while ((offset < end) && (pivots[offset] < mas->index));
+
+ prev_min = min;
+ min = pivots[offset - 1] + 1;
+ prev_max = max;
+ if (likely(offset < end && pivots[offset]))
+ max = pivots[offset];
+
+next:
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ mas->offset = offset;
+ mas->index = min;
+ mas->last = max;
+ mas->min = prev_min;
+ mas->max = prev_max;
+ mas->node = last;
+ return (void *) next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+/*
+ * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
+ * @mas: The starting maple state
+ * @mast: The maple_subtree_state, keeps track of 4 maple states.
+ * @count: The estimated count of iterations needed.
+ *
+ * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
+ * is hit. First @b_node is split into two entries which are inserted into the
+ * next iteration of the loop. @b_node is returned populated with the final
+ * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
+ * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
+ * to account of what has been copied into the new sub-tree. The update of
+ * orig_l_mas->last is used in mas_consume to find the slots that will need to
+ * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
+ * the new sub-tree in case the sub-tree becomes the full tree.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static int mas_spanning_rebalance(struct ma_state *mas,
+ struct maple_subtree_state *mast, unsigned char count)
+{
+ unsigned char split, mid_split;
+ unsigned char slot = 0;
+ struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->index);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(m_mas, mas->tree, mas->index, mas->index);
+ MA_TOPIARY(free, mas->tree);
+ MA_TOPIARY(destroy, mas->tree);
+
+ /*
+ * The tree needs to be rebalanced and leaves need to be kept at the same level.
+ * Rebalancing is done by use of the ``struct maple_topiary``.
+ */
+ mast->l = &l_mas;
+ mast->m = &m_mas;
+ mast->r = &r_mas;
+ mast->free = &free;
+ mast->destroy = &destroy;
+ l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+ if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) &&
+ unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
+ mast_spanning_rebalance(mast);
+
+ mast->orig_l->depth = 0;
+
+ /*
+ * Each level of the tree is examined and balanced, pushing data to the left or
+ * right, or rebalancing against left or right nodes is employed to avoid
+ * rippling up the tree to limit the amount of churn. Once a new sub-section of
+ * the tree is created, there may be a mix of new and old nodes. The old nodes
+ * will have the incorrect parent pointers and currently be in two trees: the
+ * original tree and the partially new tree. To remedy the parent pointers in
+ * the old tree, the new data is swapped into the active tree and a walk down
+ * the tree is performed and the parent pointers are updated.
+ * See mas_descend_adopt() for more information..
+ */
+ while (count--) {
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mast->orig_l->node);
+ split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
+ &mid_split, mast->orig_l->min);
+ mast_set_split_parents(mast, left, middle, right, split,
+ mid_split);
+ mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
+
+ /*
+ * Copy data from next level in the tree to mast->bn from next
+ * iteration
+ */
+ memset(mast->bn, 0, sizeof(struct maple_big_node));
+ mast->bn->type = mte_node_type(left);
+ mast->orig_l->depth++;
+
+ /* Root already stored in l->node. */
+ if (mas_is_root_limits(mast->l))
+ goto new_root;
+
+ mast_ascend_free(mast);
+ mast_combine_cp_left(mast);
+ l_mas.offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, &l_mas, left);
+ mab_set_b_end(mast->bn, &m_mas, middle);
+ mab_set_b_end(mast->bn, &r_mas, right);
+
+ /* Copy anything necessary out of the right node. */
+ mast_combine_cp_right(mast);
+ mast_topiary(mast);
+ mast->orig_l->last = mast->orig_l->max;
+
+ if (mast_sufficient(mast))
+ continue;
+
+ if (mast_overflow(mast))
+ continue;
+
+ /* May be a new root stored in mast->bn */
+ if (mas_is_root_limits(mast->orig_l))
+ break;
+
+ mast_spanning_rebalance(mast);
+
+ /* rebalancing from other nodes may require another loop. */
+ if (!count)
+ count++;
+ }
+
+ l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
+ mte_node_type(mast->orig_l->node));
+ mast->orig_l->depth++;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
+ mte_set_parent(left, l_mas.node, slot);
+ if (middle)
+ mte_set_parent(middle, l_mas.node, ++slot);
+
+ if (right)
+ mte_set_parent(right, l_mas.node, ++slot);
+
+ if (mas_is_root_limits(mast->l)) {
+new_root:
+ mast_new_root(mast, mas);
+ } else {
+ mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
+ }
+
+ if (!mte_dead_node(mast->orig_l->node))
+ mat_add(&free, mast->orig_l->node);
+
+ mas->depth = mast->orig_l->depth;
+ *mast->orig_l = l_mas;
+ mte_set_node_dead(mas->node);
+
+ /* Set up mas for insertion. */
+ mast->orig_l->depth = mas->depth;
+ mast->orig_l->alloc = mas->alloc;
+ *mas = *mast->orig_l;
+ mas_wmb_replace(mas, &free, &destroy);
+ mtree_range_walk(mas);
+ return mast->bn->b_end;
+}
+
+/*
+ * mas_rebalance() - Rebalance a given node.
+ * @mas: The maple state
+ * @b_node: The big maple node.
+ *
+ * Rebalance two nodes into a single node or two new nodes that are sufficient.
+ * Continue upwards until tree is sufficient.
+ *
+ * Return: the number of elements in b_node during the last loop.
+ */
+static inline int mas_rebalance(struct ma_state *mas,
+ struct maple_big_node *b_node)
+{
+ char empty_count = mas_mt_height(mas);
+ struct maple_subtree_state mast;
+ unsigned char shift, b_end = ++b_node->b_end;
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+
+ trace_ma_op(__func__, mas);
+
+ /*
+ * Rebalancing occurs if a node is insufficient. Data is rebalanced
+ * against the node to the right if it exists, otherwise the node to the
+ * left of this node is rebalanced against this node. If rebalancing
+ * causes just one node to be produced instead of two, then the parent
+ * is also examined and rebalanced if it is insufficient. Every level
+ * tries to combine the data in the same way. If one node contains the
+ * entire range of the tree, then that node is used as a new root node.
+ */
+ mas_node_count(mas, 1 + empty_count * 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ mast.bn = b_node;
+ mast.bn->type = mte_node_type(mas->node);
+
+ l_mas = r_mas = *mas;
+
+ if (mas_next_sibling(&r_mas)) {
+ mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
+ r_mas.last = r_mas.index = r_mas.max;
+ } else {
+ mas_prev_sibling(&l_mas);
+ shift = mas_data_end(&l_mas) + 1;
+ mab_shift_right(b_node, shift);
+ mas->offset += shift;
+ mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
+ b_node->b_end = shift + b_end;
+ l_mas.index = l_mas.last = l_mas.min;
+ }
+
+ return mas_spanning_rebalance(mas, &mast, empty_count);
+}
+
+/*
+ * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
+ * state.
+ * @mas: The maple state
+ * @end: The end of the left-most node.
+ *
+ * During a mass-insert event (such as forking), it may be necessary to
+ * rebalance the left-most node when it is not sufficient.
+ */
+static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
+{
+ enum maple_type mt = mte_node_type(mas->node);
+ struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
+ struct maple_enode *eparent;
+ unsigned char offset, tmp, split = mt_slots[mt] / 2;
+ void __rcu **l_slots, **slots;
+ unsigned long *l_pivs, *pivs, gap;
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+
+ l_mas = *mas;
+ mas_prev_sibling(&l_mas);
+
+ /* set up node. */
+ if (in_rcu) {
+ /* Allocate for both left and right as well as parent. */
+ mas_node_count(mas, 3);
+ if (mas_is_err(mas))
+ return;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ newnode = &reuse;
+ }
+
+ node = mas_mn(mas);
+ newnode->parent = node->parent;
+ slots = ma_slots(newnode, mt);
+ pivs = ma_pivots(newnode, mt);
+ left = mas_mn(&l_mas);
+ l_slots = ma_slots(left, mt);
+ l_pivs = ma_pivots(left, mt);
+ if (!l_slots[split])
+ split++;
+ tmp = mas_data_end(&l_mas) - split;
+
+ memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
+ memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
+ pivs[tmp] = l_mas.max;
+ memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
+ memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
+
+ l_mas.max = l_pivs[split];
+ mas->min = l_mas.max + 1;
+ eparent = mt_mk_node(mte_parent(l_mas.node),
+ mas_parent_enum(&l_mas, l_mas.node));
+ tmp += end;
+ if (!in_rcu) {
+ unsigned char max_p = mt_pivots[mt];
+ unsigned char max_s = mt_slots[mt];
+
+ if (tmp < max_p)
+ memset(pivs + tmp, 0,
+ sizeof(unsigned long *) * (max_p - tmp));
+
+ if (tmp < mt_slots[mt])
+ memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+
+ memcpy(node, newnode, sizeof(struct maple_node));
+ ma_set_meta(node, mt, 0, tmp - 1);
+ mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
+ l_pivs[split]);
+
+ /* Remove data from l_pivs. */
+ tmp = split + 1;
+ memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
+ memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
+ ma_set_meta(left, mt, 0, split);
+
+ goto done;
+ }
+
+ /* RCU requires replacing both l_mas, mas, and parent. */
+ mas->node = mt_mk_node(newnode, mt);
+ ma_set_meta(newnode, mt, 0, tmp);
+
+ new_left = mas_pop_node(mas);
+ new_left->parent = left->parent;
+ mt = mte_node_type(l_mas.node);
+ slots = ma_slots(new_left, mt);
+ pivs = ma_pivots(new_left, mt);
+ memcpy(slots, l_slots, sizeof(void *) * split);
+ memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
+ ma_set_meta(new_left, mt, 0, split);
+ l_mas.node = mt_mk_node(new_left, mt);
+
+ /* replace parent. */
+ offset = mte_parent_slot(mas->node);
+ mt = mas_parent_enum(&l_mas, l_mas.node);
+ parent = mas_pop_node(mas);
+ slots = ma_slots(parent, mt);
+ pivs = ma_pivots(parent, mt);
+ memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
+ rcu_assign_pointer(slots[offset], mas->node);
+ rcu_assign_pointer(slots[offset - 1], l_mas.node);
+ pivs[offset - 1] = l_mas.max;
+ eparent = mt_mk_node(parent, mt);
+done:
+ gap = mas_leaf_max_gap(mas);
+ mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
+ gap = mas_leaf_max_gap(&l_mas);
+ mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
+ mas_ascend(mas);
+
+ if (in_rcu)
+ mas_replace(mas, false);
+
+ mas_update_gap(mas);
+}
+
+/*
+ * mas_split_final_node() - Split the final node in a subtree operation.
+ * @mast: the maple subtree state
+ * @mas: The maple state
+ * @height: The height of the tree in case it's a new root.
+ */
+static inline bool mas_split_final_node(struct maple_subtree_state *mast,
+ struct ma_state *mas, int height)
+{
+ struct maple_enode *ancestor;
+
+ if (mte_is_root(mas->node)) {
+ if (mt_is_alloc(mas->tree))
+ mast->bn->type = maple_arange_64;
+ else
+ mast->bn->type = maple_range_64;
+ mas->depth = height;
+ }
+ /*
+ * Only a single node is used here, could be root.
+ * The Big_node data should just fit in a single node.
+ */
+ ancestor = mas_new_ma_node(mas, mast->bn);
+ mte_set_parent(mast->l->node, ancestor, mast->l->offset);
+ mte_set_parent(mast->r->node, ancestor, mast->r->offset);
+ mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
+
+ mast->l->node = ancestor;
+ mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
+ mas->offset = mast->bn->b_end - 1;
+ return true;
+}
+
+/*
+ * mast_fill_bnode() - Copy data into the big node in the subtree state
+ * @mast: The maple subtree state
+ * @mas: the maple state
+ * @skip: The number of entries to skip for new nodes insertion.
+ */
+static inline void mast_fill_bnode(struct maple_subtree_state *mast,
+ struct ma_state *mas,
+ unsigned char skip)
+{
+ bool cp = true;
+ struct maple_enode *old = mas->node;
+ unsigned char split;
+
+ memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
+ memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
+ memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
+ mast->bn->b_end = 0;
+
+ if (mte_is_root(mas->node)) {
+ cp = false;
+ } else {
+ mas_ascend(mas);
+ mat_add(mast->free, old);
+ mas->offset = mte_parent_slot(mas->node);
+ }
+
+ if (cp && mast->l->offset)
+ mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
+
+ split = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->l, mast->l->node);
+ mast->r->offset = mast->bn->b_end;
+ mab_set_b_end(mast->bn, mast->r, mast->r->node);
+ if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
+ cp = false;
+
+ if (cp)
+ mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
+ mast->bn, mast->bn->b_end);
+
+ mast->bn->b_end--;
+ mast->bn->type = mte_node_type(mas->node);
+}
+
+/*
+ * mast_split_data() - Split the data in the subtree state big node into regular
+ * nodes.
+ * @mast: The maple subtree state
+ * @mas: The maple state
+ * @split: The location to split the big node
+ */
+static inline void mast_split_data(struct maple_subtree_state *mast,
+ struct ma_state *mas, unsigned char split)
+{
+ unsigned char p_slot;
+
+ mab_mas_cp(mast->bn, 0, split, mast->l, true);
+ mte_set_pivot(mast->r->node, 0, mast->r->max);
+ mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
+ mast->l->offset = mte_parent_slot(mas->node);
+ mast->l->max = mast->bn->pivot[split];
+ mast->r->min = mast->l->max + 1;
+ if (mte_is_leaf(mas->node))
+ return;
+
+ p_slot = mast->orig_l->offset;
+ mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
+ &p_slot, split);
+ mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
+ &p_slot, split);
+}
+
+/*
+ * mas_push_data() - Instead of splitting a node, it is beneficial to push the
+ * data to the right or left node if there is room.
+ * @mas: The maple state
+ * @height: The current height of the maple state
+ * @mast: The maple subtree state
+ * @left: Push left or not.
+ *
+ * Keeping the height of the tree low means faster lookups.
+ *
+ * Return: True if pushed, false otherwise.
+ */
+static inline bool mas_push_data(struct ma_state *mas, int height,
+ struct maple_subtree_state *mast, bool left)
+{
+ unsigned char slot_total = mast->bn->b_end;
+ unsigned char end, space, split;
+
+ MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
+ tmp_mas = *mas;
+ tmp_mas.depth = mast->l->depth;
+
+ if (left && !mas_prev_sibling(&tmp_mas))
+ return false;
+ else if (!left && !mas_next_sibling(&tmp_mas))
+ return false;
+
+ end = mas_data_end(&tmp_mas);
+ slot_total += end;
+ space = 2 * mt_slot_count(mas->node) - 2;
+ /* -2 instead of -1 to ensure there isn't a triple split */
+ if (ma_is_leaf(mast->bn->type))
+ space--;
+
+ if (mas->max == ULONG_MAX)
+ space--;
+
+ if (slot_total >= space)
+ return false;
+
+ /* Get the data; Fill mast->bn */
+ mast->bn->b_end++;
+ if (left) {
+ mab_shift_right(mast->bn, end + 1);
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
+ mast->bn->b_end = slot_total + 1;
+ } else {
+ mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
+ }
+
+ /* Configure mast for splitting of mast->bn */
+ split = mt_slots[mast->bn->type] - 2;
+ if (left) {
+ /* Switch mas to prev node */
+ mat_add(mast->free, mas->node);
+ *mas = tmp_mas;
+ /* Start using mast->l for the left side. */
+ tmp_mas.node = mast->l->node;
+ *mast->l = tmp_mas;
+ } else {
+ mat_add(mast->free, tmp_mas.node);
+ tmp_mas.node = mast->r->node;
+ *mast->r = tmp_mas;
+ split = slot_total - split;
+ }
+ split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
+ /* Update parent slot for split calculation. */
+ if (left)
+ mast->orig_l->offset += end + 1;
+
+ mast_split_data(mast, mas, split);
+ mast_fill_bnode(mast, mas, 2);
+ mas_split_final_node(mast, mas, height + 1);
+ return true;
+}
+
+/*
+ * mas_split() - Split data that is too big for one node into two.
+ * @mas: The maple state
+ * @b_node: The maple big node
+ * Return: 1 on success, 0 on failure.
+ */
+static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
+{
+
+ struct maple_subtree_state mast;
+ int height = 0;
+ unsigned char mid_split, split = 0;
+
+ /*
+ * Splitting is handled differently from any other B-tree; the Maple
+ * Tree splits upwards. Splitting up means that the split operation
+ * occurs when the walk of the tree hits the leaves and not on the way
+ * down. The reason for splitting up is that it is impossible to know
+ * how much space will be needed until the leaf is (or leaves are)
+ * reached. Since overwriting data is allowed and a range could
+ * overwrite more than one range or result in changing one entry into 3
+ * entries, it is impossible to know if a split is required until the
+ * data is examined.
+ *
+ * Splitting is a balancing act between keeping allocations to a minimum
+ * and avoiding a 'jitter' event where a tree is expanded to make room
+ * for an entry followed by a contraction when the entry is removed. To
+ * accomplish the balance, there are empty slots remaining in both left
+ * and right nodes after a split.
+ */
+ MA_STATE(l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(r_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
+ MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
+ MA_TOPIARY(mat, mas->tree);
+
+ trace_ma_op(__func__, mas);
+ mas->depth = mas_mt_height(mas);
+ /* Allocation failures will happen early. */
+ mas_node_count(mas, 1 + mas->depth * 2);
+ if (mas_is_err(mas))
+ return 0;
+
+ mast.l = &l_mas;
+ mast.r = &r_mas;
+ mast.orig_l = &prev_l_mas;
+ mast.orig_r = &prev_r_mas;
+ mast.free = &mat;
+ mast.bn = b_node;
+
+ while (height++ <= mas->depth) {
+ if (mt_slots[b_node->type] > b_node->b_end) {
+ mas_split_final_node(&mast, mas, height);
+ break;
+ }
+
+ l_mas = r_mas = *mas;
+ l_mas.node = mas_new_ma_node(mas, b_node);
+ r_mas.node = mas_new_ma_node(mas, b_node);
+ /*
+ * Another way that 'jitter' is avoided is to terminate a split up early if the
+ * left or right node has space to spare. This is referred to as "pushing left"
+ * or "pushing right" and is similar to the B* tree, except the nodes left or
+ * right can rarely be reused due to RCU, but the ripple upwards is halted which
+ * is a significant savings.
+ */
+ /* Try to push left. */
+ if (mas_push_data(mas, height, &mast, true))
+ break;
+
+ /* Try to push right. */
+ if (mas_push_data(mas, height, &mast, false))
+ break;
+
+ split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
+ mast_split_data(&mast, mas, split);
+ /*
+ * Usually correct, mab_mas_cp in the above call overwrites
+ * r->max.
+ */
+ mast.r->max = mas->max;
+ mast_fill_bnode(&mast, mas, 1);
+ prev_l_mas = *mast.l;
+ prev_r_mas = *mast.r;
+ }
+
+ /* Set the original node as dead */
+ mat_add(mast.free, mas->node);
+ mas->node = l_mas.node;
+ mas_wmb_replace(mas, mast.free, NULL);
+ mtree_range_walk(mas);
+ return 1;
+}
+
+/*
+ * mas_reuse_node() - Reuse the node to store the data.
+ * @wr_mas: The maple write state
+ * @bn: The maple big node
+ * @end: The end of the data.
+ *
+ * Will always return false in RCU mode.
+ *
+ * Return: True if node was reused, false otherwise.
+ */
+static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *bn, unsigned char end)
+{
+ /* Need to be rcu safe. */
+ if (mt_in_rcu(wr_mas->mas->tree))
+ return false;
+
+ if (end > bn->b_end) {
+ int clear = mt_slots[wr_mas->type] - bn->b_end;
+
+ memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
+ memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
+ }
+ mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
+ return true;
+}
+
+/*
+ * mas_commit_b_node() - Commit the big node into the tree.
+ * @wr_mas: The maple write state
+ * @b_node: The maple big node
+ * @end: The end of the data.
+ */
+static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
+ struct maple_big_node *b_node, unsigned char end)
+{
+ struct maple_node *node;
+ unsigned char b_end = b_node->b_end;
+ enum maple_type b_type = b_node->type;
+
+ if ((b_end < mt_min_slots[b_type]) &&
+ (!mte_is_root(wr_mas->mas->node)) &&
+ (mas_mt_height(wr_mas->mas) > 1))
+ return mas_rebalance(wr_mas->mas, b_node);
+
+ if (b_end >= mt_slots[b_type])
+ return mas_split(wr_mas->mas, b_node);
+
+ if (mas_reuse_node(wr_mas, b_node, end))
+ goto reuse_node;
+
+ mas_node_count(wr_mas->mas, 1);
+ if (mas_is_err(wr_mas->mas))
+ return 0;
+
+ node = mas_pop_node(wr_mas->mas);
+ node->parent = mas_mn(wr_mas->mas)->parent;
+ wr_mas->mas->node = mt_mk_node(node, b_type);
+ mab_mas_cp(b_node, 0, b_end, wr_mas->mas, true);
+
+ mas_replace(wr_mas->mas, false);
+reuse_node:
+ mas_update_gap(wr_mas->mas);
+ return 1;
+}
+
+/*
+ * mas_root_expand() - Expand a root to a node
+ * @mas: The maple state
+ * @entry: The entry to store into the tree
+ */
+static inline int mas_root_expand(struct ma_state *mas, void *entry)
+{
+ void *contents = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+ int slot = 0;
+
+ mas_node_count(mas, 1);
+ if (unlikely(mas_is_err(mas)))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ mas->node = mt_mk_node(node, type);
+
+ if (mas->index) {
+ if (contents) {
+ rcu_assign_pointer(slots[slot], contents);
+ if (likely(mas->index > 1))
+ slot++;
+ }
+ pivots[slot++] = mas->index - 1;
+ }
+
+ rcu_assign_pointer(slots[slot], entry);
+ mas->offset = slot;
+ pivots[slot] = mas->last;
+ if (mas->last != ULONG_MAX)
+ slot++;
+ mas->depth = 1;
+ mas_set_height(mas);
+
+ /* swap the new root into the tree */
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+ ma_set_meta(node, maple_leaf_64, 0, slot);
+ return slot;
+}
+
+static inline void mas_store_root(struct ma_state *mas, void *entry)
+{
+ if (likely((mas->last != 0) || (mas->index != 0)))
+ mas_root_expand(mas, entry);
+ else if (((unsigned long) (entry) & 3) == 2)
+ mas_root_expand(mas, entry);
+ else {
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->node = MAS_START;
+ }
+}
+
+/*
+ * mas_is_span_wr() - Check if the write needs to be treated as a write that
+ * spans the node.
+ * @mas: The maple state
+ * @piv: The pivot value being written
+ * @type: The maple node type
+ * @entry: The data to write
+ *
+ * Spanning writes are writes that start in one node and end in another OR if
+ * the write of a %NULL will cause the node to end with a %NULL.
+ *
+ * Return: True if this is a spanning write, false otherwise.
+ */
+static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
+{
+ unsigned long max;
+ unsigned long last = wr_mas->mas->last;
+ unsigned long piv = wr_mas->r_max;
+ enum maple_type type = wr_mas->type;
+ void *entry = wr_mas->entry;
+
+ /* Contained in this pivot */
+ if (piv > last)
+ return false;
+
+ max = wr_mas->mas->max;
+ if (unlikely(ma_is_leaf(type))) {
+ /* Fits in the node, but may span slots. */
+ if (last < max)
+ return false;
+
+ /* Writes to the end of the node but not null. */
+ if ((last == max) && entry)
+ return false;
+
+ /*
+ * Writing ULONG_MAX is not a spanning write regardless of the
+ * value being written as long as the range fits in the node.
+ */
+ if ((last == ULONG_MAX) && (last == max))
+ return false;
+ } else if (piv == last) {
+ if (entry)
+ return false;
+
+ /* Detect spanning store wr walk */
+ if (last == ULONG_MAX)
+ return false;
+ }
+
+ trace_ma_write(__func__, wr_mas->mas, piv, entry);
+
+ return true;
+}
+
+static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->depth++;
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ mas_wr_node_walk(wr_mas);
+ wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
+}
+
+static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
+{
+ wr_mas->mas->max = wr_mas->r_max;
+ wr_mas->mas->min = wr_mas->r_min;
+ wr_mas->mas->node = wr_mas->content;
+ wr_mas->mas->offset = 0;
+}
+/*
+ * mas_wr_walk() - Walk the tree for a write.
+ * @wr_mas: The maple write state
+ *
+ * Uses mas_slot_locked() and does not need to worry about dead nodes.
+ *
+ * Return: True if it's contained in a node, false on spanning write.
+ */
+static bool mas_wr_walk(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ if (unlikely(mas_is_span_wr(wr_mas)))
+ return false;
+
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+
+ mas_wr_walk_traverse(wr_mas);
+ }
+
+ return true;
+}
+
+static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ while (true) {
+ mas_wr_walk_descend(wr_mas);
+ wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
+ mas->offset);
+ if (ma_is_leaf(wr_mas->type))
+ return true;
+ mas_wr_walk_traverse(wr_mas);
+
+ }
+ return true;
+}
+/*
+ * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
+ * @l_wr_mas: The left maple write state
+ * @r_wr_mas: The right maple write state
+ */
+static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
+ struct ma_wr_state *r_wr_mas)
+{
+ struct ma_state *r_mas = r_wr_mas->mas;
+ struct ma_state *l_mas = l_wr_mas->mas;
+ unsigned char l_slot;
+
+ l_slot = l_mas->offset;
+ if (!l_wr_mas->content)
+ l_mas->index = l_wr_mas->r_min;
+
+ if ((l_mas->index == l_wr_mas->r_min) &&
+ (l_slot &&
+ !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
+ if (l_slot > 1)
+ l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
+ else
+ l_mas->index = l_mas->min;
+
+ l_mas->offset = l_slot - 1;
+ }
+
+ if (!r_wr_mas->content) {
+ if (r_mas->last < r_wr_mas->r_max)
+ r_mas->last = r_wr_mas->r_max;
+ r_mas->offset++;
+ } else if ((r_mas->last == r_wr_mas->r_max) &&
+ (r_mas->last < r_mas->max) &&
+ !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
+ r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
+ r_wr_mas->type, r_mas->offset + 1);
+ r_mas->offset++;
+ }
+}
+
+static inline void *mas_state_walk(struct ma_state *mas)
+{
+ void *entry;
+
+ entry = mas_start(mas);
+ if (mas_is_none(mas))
+ return NULL;
+
+ if (mas_is_ptr(mas))
+ return entry;
+
+ return mtree_range_walk(mas);
+}
+
+/*
+ * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
+ * to date.
+ *
+ * @mas: The maple state.
+ *
+ * Note: Leaves mas in undesirable state.
+ * Return: The entry for @mas->index or %NULL on dead node.
+ */
+static inline void *mtree_lookup_walk(struct ma_state *mas)
+{
+ unsigned long *pivots;
+ unsigned char offset;
+ struct maple_node *node;
+ struct maple_enode *next;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char end;
+ unsigned long max;
+
+ next = mas->node;
+ max = ULONG_MAX;
+ do {
+ offset = 0;
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ pivots = ma_pivots(node, type);
+ end = ma_data_end(node, type, pivots, max);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+
+ if (pivots[offset] >= mas->index)
+ goto next;
+
+ do {
+ offset++;
+ } while ((offset < end) && (pivots[offset] < mas->index));
+
+ if (likely(offset > end))
+ max = pivots[offset];
+
+next:
+ slots = ma_slots(node, type);
+ next = mt_slot(mas->tree, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ goto dead_node;
+ } while (!ma_is_leaf(type));
+
+ return (void *) next;
+
+dead_node:
+ mas_reset(mas);
+ return NULL;
+}
+
+/*
+ * mas_new_root() - Create a new root node that only contains the entry passed
+ * in.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Only valid when the index == 0 and the last == ULONG_MAX
+ *
+ * Return 0 on error, 1 on success.
+ */
+static inline int mas_new_root(struct ma_state *mas, void *entry)
+{
+ struct maple_enode *root = mas_root_locked(mas);
+ enum maple_type type = maple_leaf_64;
+ struct maple_node *node;
+ void __rcu **slots;
+ unsigned long *pivots;
+
+ if (!entry && !mas->index && mas->last == ULONG_MAX) {
+ mas->depth = 0;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, entry);
+ mas->node = MAS_START;
+ goto done;
+ }
+
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return 0;
+
+ node = mas_pop_node(mas);
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ node->parent = ma_parent_ptr(
+ ((unsigned long)mas->tree | MA_ROOT_PARENT));
+ mas->node = mt_mk_node(node, type);
+ rcu_assign_pointer(slots[0], entry);
+ pivots[0] = mas->last;
+ mas->depth = 1;
+ mas_set_height(mas);
+ rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
+
+done:
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mas->tree);
+
+ return 1;
+}
+/*
+ * mas_wr_spanning_store() - Create a subtree with the store operation completed
+ * and new nodes where necessary, then place the sub-tree in the actual tree.
+ * Note that mas is expected to point to the node which caused the store to
+ * span.
+ * @wr_mas: The maple write state
+ *
+ * Return: 0 on error, positive on success.
+ */
+static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
+{
+ struct maple_subtree_state mast;
+ struct maple_big_node b_node;
+ struct ma_state *mas;
+ unsigned char height;
+
+ /* Left and Right side of spanning store */
+ MA_STATE(l_mas, NULL, 0, 0);
+ MA_STATE(r_mas, NULL, 0, 0);
+
+ MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
+ MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
+
+ /*
+ * A store operation that spans multiple nodes is called a spanning
+ * store and is handled early in the store call stack by the function
+ * mas_is_span_wr(). When a spanning store is identified, the maple
+ * state is duplicated. The first maple state walks the left tree path
+ * to ``index``, the duplicate walks the right tree path to ``last``.
+ * The data in the two nodes are combined into a single node, two nodes,
+ * or possibly three nodes (see the 3-way split above). A ``NULL``
+ * written to the last entry of a node is considered a spanning store as
+ * a rebalance is required for the operation to complete and an overflow
+ * of data may happen.
+ */
+ mas = wr_mas->mas;
+ trace_ma_op(__func__, mas);
+
+ if (unlikely(!mas->index && mas->last == ULONG_MAX))
+ return mas_new_root(mas, wr_mas->entry);
+ /*
+ * Node rebalancing may occur due to this store, so there may be three new
+ * entries per level plus a new root.
+ */
+ height = mas_mt_height(mas);
+ mas_node_count(mas, 1 + height * 3);
+ if (mas_is_err(mas))
+ return 0;
+
+ /*
+ * Set up right side. Need to get to the next offset after the spanning
+ * store to ensure it's not NULL and to combine both the next node and
+ * the node with the start together.
+ */
+ r_mas = *mas;
+ /* Avoid overflow, walk to next slot in the tree. */
+ if (r_mas.last + 1)
+ r_mas.last++;
+
+ r_mas.index = r_mas.last;
+ mas_wr_walk_index(&r_wr_mas);
+ r_mas.last = r_mas.index = mas->last;
+
+ /* Set up left side. */
+ l_mas = *mas;
+ mas_wr_walk_index(&l_wr_mas);
+
+ if (!wr_mas->entry) {
+ mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
+ mas->offset = l_mas.offset;
+ mas->index = l_mas.index;
+ mas->last = l_mas.last = r_mas.last;
+ }
+
+ /* expanding NULLs may make this cover the entire range */
+ if (!l_mas.index && r_mas.last == ULONG_MAX) {
+ mas_set_range(mas, 0, ULONG_MAX);
+ return mas_new_root(mas, wr_mas->entry);
+ }
+
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ /* Copy l_mas and store the value in b_node. */
+ mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+ /* Copy r_mas into b_node. */
+ if (r_mas.offset <= r_wr_mas.node_end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ &b_node, b_node.b_end + 1);
+ else
+ b_node.b_end++;
+
+ /* Stop spanning searches by searching for just index. */
+ l_mas.index = l_mas.last = mas->index;
+
+ mast.bn = &b_node;
+ mast.orig_l = &l_mas;
+ mast.orig_r = &r_mas;
+ /* Combine l_mas and r_mas and split them up evenly again. */
+ return mas_spanning_rebalance(mas, &mast, height + 1);
+}
+
+/*
+ * mas_wr_node_store() - Attempt to store the value in a node
+ * @wr_mas: The maple write state
+ *
+ * Attempts to reuse the node, but may allocate.
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ void __rcu **dst_slots;
+ unsigned long *dst_pivots;
+ unsigned char dst_offset;
+ unsigned char new_end = wr_mas->node_end;
+ unsigned char offset;
+ unsigned char node_slots = mt_slots[wr_mas->type];
+ struct maple_node reuse, *newnode;
+ unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
+ bool in_rcu = mt_in_rcu(mas->tree);
+
+ offset = mas->offset;
+ if (mas->last == wr_mas->r_max) {
+ /* runs right to the end of the node */
+ if (mas->last == mas->max)
+ new_end = offset;
+ /* don't copy this offset */
+ wr_mas->offset_end++;
+ } else if (mas->last < wr_mas->r_max) {
+ /* new range ends in this range */
+ if (unlikely(wr_mas->r_max == ULONG_MAX))
+ mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+
+ new_end++;
+ } else {
+ if (wr_mas->end_piv == mas->last)
+ wr_mas->offset_end++;
+
+ new_end -= wr_mas->offset_end - offset - 1;
+ }
+
+ /* new range starts within a range */
+ if (wr_mas->r_min < mas->index)
+ new_end++;
+
+ /* Not enough room */
+ if (new_end >= node_slots)
+ return false;
+
+ /* Not enough data. */
+ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
+ !(mas->mas_flags & MA_STATE_BULK))
+ return false;
+
+ /* set up node. */
+ if (in_rcu) {
+ mas_node_count(mas, 1);
+ if (mas_is_err(mas))
+ return false;
+
+ newnode = mas_pop_node(mas);
+ } else {
+ memset(&reuse, 0, sizeof(struct maple_node));
+ newnode = &reuse;
+ }
+
+ newnode->parent = mas_mn(mas)->parent;
+ dst_pivots = ma_pivots(newnode, wr_mas->type);
+ dst_slots = ma_slots(newnode, wr_mas->type);
+ /* Copy from start to insert point */
+ memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
+ memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
+ dst_offset = offset;
+
+ /* Handle insert of new range starting after old range */
+ if (wr_mas->r_min < mas->index) {
+ mas->offset++;
+ rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
+ dst_pivots[dst_offset++] = mas->index - 1;
+ }
+
+ /* Store the new entry and range end. */
+ if (dst_offset < max_piv)
+ dst_pivots[dst_offset] = mas->last;
+ mas->offset = dst_offset;
+ rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
+
+ /*
+ * this range wrote to the end of the node or it overwrote the rest of
+ * the data
+ */
+ if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
+ new_end = dst_offset;
+ goto done;
+ }
+
+ dst_offset++;
+ /* Copy to the end of node if necessary. */
+ copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
+ memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
+ sizeof(void *) * copy_size);
+ if (dst_offset < max_piv) {
+ if (copy_size > max_piv - dst_offset)
+ copy_size = max_piv - dst_offset;
+
+ memcpy(dst_pivots + dst_offset,
+ wr_mas->pivots + wr_mas->offset_end,
+ sizeof(unsigned long) * copy_size);
+ }
+
+ if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
+ dst_pivots[new_end] = mas->max;
+
+done:
+ mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ if (in_rcu) {
+ mas->node = mt_mk_node(newnode, wr_mas->type);
+ mas_replace(mas, false);
+ } else {
+ memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
+ }
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ return true;
+}
+
+/*
+ * mas_wr_slot_store: Attempt to store a value in a slot.
+ * @wr_mas: the maple write state
+ *
+ * Return: True if stored, false otherwise
+ */
+static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+ unsigned long lmax; /* Logical max. */
+ unsigned char offset = mas->offset;
+
+ if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
+ (offset != wr_mas->node_end)))
+ return false;
+
+ if (offset == wr_mas->node_end - 1)
+ lmax = mas->max;
+ else
+ lmax = wr_mas->pivots[offset + 1];
+
+ /* going to overwrite too many slots. */
+ if (lmax < mas->last)
+ return false;
+
+ if (wr_mas->r_min == mas->index) {
+ /* overwriting two or more ranges with one. */
+ if (lmax == mas->last)
+ return false;
+
+ /* Overwriting all of offset and a portion of offset + 1. */
+ rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->last;
+ goto done;
+ }
+
+ /* Doesn't end on the next range end. */
+ if (lmax != mas->last)
+ return false;
+
+ /* Overwriting a portion of offset and all of offset + 1 */
+ if ((offset + 1 < mt_pivots[wr_mas->type]) &&
+ (wr_mas->entry || wr_mas->pivots[offset + 1]))
+ wr_mas->pivots[offset + 1] = mas->last;
+
+ rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
+ wr_mas->pivots[offset] = mas->index - 1;
+ mas->offset++; /* Keep mas accurate. */
+
+done:
+ trace_ma_write(__func__, mas, 0, wr_mas->entry);
+ mas_update_gap(mas);
+ return true;
+}
+
+static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
+{
+ while ((wr_mas->mas->last > wr_mas->end_piv) &&
+ (wr_mas->offset_end < wr_mas->node_end))
+ wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
+
+ if (wr_mas->mas->last > wr_mas->end_piv)
+ wr_mas->end_piv = wr_mas->mas->max;
+}
+
+static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
+ mas->last = wr_mas->end_piv;
+
+ /* Check next slot(s) if we are overwriting the end */
+ if ((mas->last == wr_mas->end_piv) &&
+ (wr_mas->node_end != wr_mas->offset_end) &&
+ !wr_mas->slots[wr_mas->offset_end + 1]) {
+ wr_mas->offset_end++;
+ if (wr_mas->offset_end == wr_mas->node_end)
+ mas->last = mas->max;
+ else
+ mas->last = wr_mas->pivots[wr_mas->offset_end];
+ wr_mas->end_piv = mas->last;
+ }
+
+ if (!wr_mas->content) {
+ /* If this one is null, the next and prev are not */
+ mas->index = wr_mas->r_min;
+ } else {
+ /* Check prev slot if we are overwriting the start */
+ if (mas->index == wr_mas->r_min && mas->offset &&
+ !wr_mas->slots[mas->offset - 1]) {
+ mas->offset--;
+ wr_mas->r_min = mas->index =
+ mas_safe_min(mas, wr_mas->pivots, mas->offset);
+ wr_mas->r_max = wr_mas->pivots[mas->offset];
+ }
+ }
+}
+
+static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
+{
+ unsigned char end = wr_mas->node_end;
+ unsigned char new_end = end + 1;
+ struct ma_state *mas = wr_mas->mas;
+ unsigned char node_pivots = mt_pivots[wr_mas->type];
+
+ if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
+ if (new_end < node_pivots)
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
+
+ if (new_end < node_pivots)
+ ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
+
+ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
+ mas->offset = new_end;
+ wr_mas->pivots[end] = mas->index - 1;
+
+ return true;
+ }
+
+ if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
+ if (new_end < node_pivots)
+ wr_mas->pivots[new_end] = wr_mas->pivots[end];
+
+ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
+ if (new_end < node_pivots)
+ ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
+
+ wr_mas->pivots[end] = mas->last;
+ rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * mas_wr_bnode() - Slow path for a modification.
+ * @wr_mas: The write maple state
+ *
+ * This is where split, rebalance end up.
+ */
+static void mas_wr_bnode(struct ma_wr_state *wr_mas)
+{
+ struct maple_big_node b_node;
+
+ trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
+ memset(&b_node, 0, sizeof(struct maple_big_node));
+ mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
+}
+
+static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
+{
+ unsigned char node_slots;
+ unsigned char node_size;
+ struct ma_state *mas = wr_mas->mas;
+
+ /* Direct replacement */
+ if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
+ rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
+ if (!!wr_mas->entry ^ !!wr_mas->content)
+ mas_update_gap(mas);
+ return;
+ }
+
+ /* Attempt to append */
+ node_slots = mt_slots[wr_mas->type];
+ node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
+ if (mas->max == ULONG_MAX)
+ node_size++;
+
+ /* slot and node store will not fit, go to the slow path */
+ if (unlikely(node_size >= node_slots))
+ goto slow_path;
+
+ if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
+ (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
+ if (!wr_mas->content || !wr_mas->entry)
+ mas_update_gap(mas);
+ return;
+ }
+
+ if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
+ return;
+ else if (mas_wr_node_store(wr_mas))
+ return;
+
+ if (mas_is_err(mas))
+ return;
+
+slow_path:
+ mas_wr_bnode(wr_mas);
+}
+
+/*
+ * mas_wr_store_entry() - Internal call to store a value
+ * @mas: The maple state
+ * @entry: The entry to store.
+ *
+ * Return: The contents that was stored at the index.
+ */
+static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
+{
+ struct ma_state *mas = wr_mas->mas;
+
+ wr_mas->content = mas_start(mas);
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ if (unlikely(!mas_wr_walk(wr_mas))) {
+ mas_wr_spanning_store(wr_mas);
+ return wr_mas->content;
+ }
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas->end_piv = wr_mas->r_max;
+ mas_wr_end_piv(wr_mas);
+
+ if (!wr_mas->entry)
+ mas_wr_extend_null(wr_mas);
+
+ /* New root for a single pointer */
+ if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
+ mas_new_root(mas, wr_mas->entry);
+ return wr_mas->content;
+ }
+
+ mas_wr_modify(wr_mas);
+ return wr_mas->content;
+}
+
+/**
+ * mas_insert() - Internal call to insert a value
+ * @mas: The maple state
+ * @entry: The entry to store
+ *
+ * Return: %NULL or the contents that already exists at the requested index
+ * otherwise. The maple state needs to be checked for error conditions.
+ */
+static inline void *mas_insert(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ /*
+ * Inserting a new range inserts either 0, 1, or 2 pivots within the
+ * tree. If the insert fits exactly into an existing gap with a value
+ * of NULL, then the slot only needs to be written with the new value.
+ * If the range being inserted is adjacent to another range, then only a
+ * single pivot needs to be inserted (as well as writing the entry). If
+ * the new range is within a gap but does not touch any other ranges,
+ * then two pivots need to be inserted: the start - 1, and the end. As
+ * usual, the entry must be written. Most operations require a new node
+ * to be allocated and replace an existing node to ensure RCU safety,
+ * when in RCU mode. The exception to requiring a newly allocated node
+ * is when inserting at the end of a node (appending). When done
+ * carefully, appending can reuse the node in place.
+ */
+ wr_mas.content = mas_start(mas);
+ if (wr_mas.content)
+ goto exists;
+
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_store_root(mas, entry);
+ return NULL;
+ }
+
+ /* spanning writes always overwrite something */
+ if (!mas_wr_walk(&wr_mas))
+ goto exists;
+
+ /* At this point, we are at the leaf node that needs to be altered. */
+ wr_mas.offset_end = mas->offset;
+ wr_mas.end_piv = wr_mas.r_max;
+
+ if (wr_mas.content || (mas->last > wr_mas.r_max))
+ goto exists;
+
+ if (!entry)
+ return NULL;
+
+ mas_wr_modify(&wr_mas);
+ return wr_mas.content;
+
+exists:
+ mas_set_err(mas, -EEXIST);
+ return wr_mas.content;
+
+}
+
+/*
+ * mas_prev_node() - Find the prev non-null entry at the same level in the
+ * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
+ * @mas: The maple state
+ * @min: The lower limit to search
+ *
+ * The prev node value will be mas->node[mas->offset] or MAS_NONE.
+ * Return: 1 if the node is dead, 0 otherwise.
+ */
+static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+{
+ enum maple_type mt;
+ int offset, level;
+ void __rcu **slots;
+ struct maple_node *node;
+ struct maple_enode *enode;
+ unsigned long *pivots;
+
+ if (mas_is_none(mas))
+ return 0;
+
+ level = 0;
+ do {
+ node = mas_mn(mas);
+ if (ma_is_root(node))
+ goto no_entry;
+
+ /* Walk up. */
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+ offset = mas->offset;
+ level++;
+ } while (!offset);
+
+ offset--;
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ mas->max = pivots[offset];
+ if (offset)
+ mas->min = pivots[offset - 1] + 1;
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ if (mas->max < min)
+ goto no_entry_min;
+
+ while (level > 1) {
+ level--;
+ enode = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = enode;
+ mt = mte_node_type(mas->node);
+ node = mas_mn(mas);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ offset = ma_data_end(node, mt, pivots, mas->max);
+ if (offset)
+ mas->min = pivots[offset - 1] + 1;
+
+ if (offset < mt_pivots[mt])
+ mas->max = pivots[offset];
+
+ if (mas->max < min)
+ goto no_entry;
+ }
+
+ mas->node = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->offset = mas_data_end(mas);
+ if (unlikely(mte_dead_node(mas->node)))
+ return 1;
+
+ return 0;
+
+no_entry_min:
+ mas->offset = offset;
+ if (offset)
+ mas->min = pivots[offset - 1] + 1;
+no_entry:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = MAS_NONE;
+ return 0;
+}
+
+/*
+ * mas_next_node() - Get the next node at the same level in the tree.
+ * @mas: The maple state
+ * @max: The maximum pivot value to check.
+ *
+ * The next value will be mas->node[mas->offset] or MAS_NONE.
+ * Return: 1 on dead node, 0 otherwise.
+ */
+static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
+{
+ unsigned long min, pivot;
+ unsigned long *pivots;
+ struct maple_enode *enode;
+ int level = 0;
+ unsigned char offset;
+ enum maple_type mt;
+ void __rcu **slots;
+
+ if (mas->max >= max)
+ goto no_entry;
+
+ level = 0;
+ do {
+ if (ma_is_root(node))
+ goto no_entry;
+
+ min = mas->max + 1;
+ if (min > max)
+ goto no_entry;
+
+ if (unlikely(mas_ascend(mas)))
+ return 1;
+
+ offset = mas->offset;
+ level++;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(node, mt);
+ } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
+
+ slots = ma_slots(node, mt);
+ pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
+ while (unlikely(level > 1)) {
+ /* Descend, if necessary */
+ enode = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = enode;
+ level--;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ slots = ma_slots(node, mt);
+ pivots = ma_pivots(node, mt);
+ offset = 0;
+ pivot = pivots[0];
+ }
+
+ enode = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = enode;
+ mas->min = min;
+ mas->max = pivot;
+ return 0;
+
+no_entry:
+ if (unlikely(ma_dead_node(node)))
+ return 1;
+
+ mas->node = MAS_NONE;
+ return 0;
+}
+
+/*
+ * mas_next_nentry() - Get the next node entry
+ * @mas: The maple state
+ * @max: The maximum value to check
+ * @*range_start: Pointer to store the start of the range.
+ *
+ * Sets @mas->offset to the offset of the next node entry, @mas->last to the
+ * pivot of the entry.
+ *
+ * Return: The next entry, %NULL otherwise
+ */
+static inline void *mas_next_nentry(struct ma_state *mas,
+ struct maple_node *node, unsigned long max, enum maple_type type)
+{
+ unsigned char count;
+ unsigned long pivot;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *entry;
+
+ if (mas->last == mas->max) {
+ mas->index = mas->max;
+ return NULL;
+ }
+
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ mas->index = mas_safe_min(mas, pivots, mas->offset);
+ if (ma_dead_node(node))
+ return NULL;
+
+ if (mas->index > max)
+ return NULL;
+
+ count = ma_data_end(node, type, pivots, mas->max);
+ if (mas->offset > count)
+ return NULL;
+
+ while (mas->offset < count) {
+ pivot = pivots[mas->offset];
+ entry = mas_slot(mas, slots, mas->offset);
+ if (ma_dead_node(node))
+ return NULL;
+
+ if (entry)
+ goto found;
+
+ if (pivot >= max)
+ return NULL;
+
+ mas->index = pivot + 1;
+ mas->offset++;
+ }
+
+ if (mas->index > mas->max) {
+ mas->index = mas->last;
+ return NULL;
+ }
+
+ pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
+ entry = mas_slot(mas, slots, mas->offset);
+ if (ma_dead_node(node))
+ return NULL;
+
+ if (!pivot)
+ return NULL;
+
+ if (!entry)
+ return NULL;
+
+found:
+ mas->last = pivot;
+ return entry;
+}
+
+static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+{
+
+retry:
+ mas_set(mas, index);
+ mas_state_walk(mas);
+ if (mas_is_start(mas))
+ goto retry;
+
+ return;
+
+}
+
+/*
+ * mas_next_entry() - Internal function to get the next entry.
+ * @mas: The maple state
+ * @limit: The maximum range start.
+ *
+ * Set the @mas->node to the next entry and the range_start to
+ * the beginning value for the entry. Does not check beyond @limit.
+ * Sets @mas->index and @mas->last to the limit if it is hit.
+ * Restarts on dead nodes.
+ *
+ * Return: the next entry or %NULL.
+ */
+static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
+{
+ void *entry = NULL;
+ struct maple_enode *prev_node;
+ struct maple_node *node;
+ unsigned char offset;
+ unsigned long last;
+ enum maple_type mt;
+
+ last = mas->last;
+retry:
+ offset = mas->offset;
+ prev_node = mas->node;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ mas->offset++;
+ if (unlikely(mas->offset >= mt_slots[mt])) {
+ mas->offset = mt_slots[mt] - 1;
+ goto next_node;
+ }
+
+ while (!mas_is_none(mas)) {
+ entry = mas_next_nentry(mas, node, limit, mt);
+ if (unlikely(ma_dead_node(node))) {
+ mas_rewalk(mas, last);
+ goto retry;
+ }
+
+ if (likely(entry))
+ return entry;
+
+ if (unlikely((mas->index > limit)))
+ break;
+
+next_node:
+ prev_node = mas->node;
+ offset = mas->offset;
+ if (unlikely(mas_next_node(mas, node, limit))) {
+ mas_rewalk(mas, last);
+ goto retry;
+ }
+ mas->offset = 0;
+ node = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ }
+
+ mas->index = mas->last = limit;
+ mas->offset = offset;
+ mas->node = prev_node;
+ return NULL;
+}
+
+/*
+ * mas_prev_nentry() - Get the previous node entry.
+ * @mas: The maple state.
+ * @limit: The lower limit to check for a value.
+ *
+ * Return: the entry, %NULL otherwise.
+ */
+static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
+ unsigned long index)
+{
+ unsigned long pivot, min;
+ unsigned char offset;
+ struct maple_node *mn;
+ enum maple_type mt;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *entry;
+
+retry:
+ if (!mas->offset)
+ return NULL;
+
+ mn = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ offset = mas->offset - 1;
+ if (offset >= mt_slots[mt])
+ offset = mt_slots[mt] - 1;
+
+ slots = ma_slots(mn, mt);
+ pivots = ma_pivots(mn, mt);
+ if (offset == mt_pivots[mt])
+ pivot = mas->max;
+ else
+ pivot = pivots[offset];
+
+ if (unlikely(ma_dead_node(mn))) {
+ mas_rewalk(mas, index);
+ goto retry;
+ }
+
+ while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
+ !pivot))
+ pivot = pivots[--offset];
+
+ min = mas_safe_min(mas, pivots, offset);
+ entry = mas_slot(mas, slots, offset);
+ if (unlikely(ma_dead_node(mn))) {
+ mas_rewalk(mas, index);
+ goto retry;
+ }
+
+ if (likely(entry)) {
+ mas->offset = offset;
+ mas->last = pivot;
+ mas->index = min;
+ }
+ return entry;
+}
+
+static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
+{
+ void *entry;
+
+retry:
+ while (likely(!mas_is_none(mas))) {
+ entry = mas_prev_nentry(mas, min, mas->index);
+ if (unlikely(mas->last < min))
+ goto not_found;
+
+ if (likely(entry))
+ return entry;
+
+ if (unlikely(mas_prev_node(mas, min))) {
+ mas_rewalk(mas, mas->index);
+ goto retry;
+ }
+
+ mas->offset++;
+ }
+
+ mas->offset--;
+not_found:
+ mas->index = mas->last = min;
+ return NULL;
+}
+
+/*
+ * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
+ * highest gap address of a given size in a given node and descend.
+ * @mas: The maple state
+ * @size: The needed size.
+ *
+ * Return: True if found in a leaf, false otherwise.
+ *
+ */
+static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ struct maple_node *node = mas_mn(mas);
+ unsigned long *pivots, *gaps;
+ void __rcu **slots;
+ unsigned long gap = 0;
+ unsigned long max, min, index;
+ unsigned char offset;
+
+ if (unlikely(mas_is_err(mas)))
+ return true;
+
+ if (ma_is_dense(type)) {
+ /* dense nodes. */
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ pivots = ma_pivots(node, type);
+ slots = ma_slots(node, type);
+ gaps = ma_gaps(node, type);
+ offset = mas->offset;
+ min = mas_safe_min(mas, pivots, offset);
+ /* Skip out of bounds. */
+ while (mas->last < min)
+ min = mas_safe_min(mas, pivots, --offset);
+
+ max = mas_safe_pivot(mas, pivots, offset, type);
+ index = mas->index;
+ while (index <= max) {
+ gap = 0;
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = max - min + 1;
+
+ if (gap) {
+ if ((size <= gap) && (size <= mas->last - min + 1))
+ break;
+
+ if (!gaps) {
+ /* Skip the next slot, it cannot be a gap. */
+ if (offset < 2)
+ goto ascend;
+
+ offset -= 2;
+ max = pivots[offset];
+ min = mas_safe_min(mas, pivots, offset);
+ continue;
+ }
+ }
+
+ if (!offset)
+ goto ascend;
+
+ offset--;
+ max = min - 1;
+ min = mas_safe_min(mas, pivots, offset);
+ }
+
+ if (unlikely(index > max)) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+
+ if (unlikely(ma_is_leaf(type))) {
+ mas->offset = offset;
+ mas->min = min;
+ mas->max = min + gap - 1;
+ return true;
+ }
+
+ /* descend, only happens under lock. */
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = max;
+ mas->offset = mas_data_end(mas);
+ return false;
+
+ascend:
+ if (mte_is_root(mas->node))
+ mas_set_err(mas, -EBUSY);
+
+ return false;
+}
+
+static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ unsigned long pivot, min, gap = 0;
+ unsigned char count, offset;
+ unsigned long *gaps = NULL, *pivots = ma_pivots(mas_mn(mas), type);
+ void __rcu **slots = ma_slots(mas_mn(mas), type);
+ bool found = false;
+
+ if (ma_is_dense(type)) {
+ mas->offset = (unsigned char)(mas->index - mas->min);
+ return true;
+ }
+
+ gaps = ma_gaps(mte_to_node(mas->node), type);
+ offset = mas->offset;
+ count = mt_slots[type];
+ min = mas_safe_min(mas, pivots, offset);
+ for (; offset < count; offset++) {
+ pivot = mas_safe_pivot(mas, pivots, offset, type);
+ if (offset && !pivot)
+ break;
+
+ /* Not within lower bounds */
+ if (mas->index > pivot)
+ goto next_slot;
+
+ if (gaps)
+ gap = gaps[offset];
+ else if (!mas_slot(mas, slots, offset))
+ gap = min(pivot, mas->last) - max(mas->index, min) + 1;
+ else
+ goto next_slot;
+
+ if (gap >= size) {
+ if (ma_is_leaf(type)) {
+ found = true;
+ goto done;
+ }
+ if (mas->index <= pivot) {
+ mas->node = mas_slot(mas, slots, offset);
+ mas->min = min;
+ mas->max = pivot;
+ offset = 0;
+ type = mte_node_type(mas->node);
+ count = mt_slots[type];
+ break;
+ }
+ }
+next_slot:
+ min = pivot + 1;
+ if (mas->last <= pivot) {
+ mas_set_err(mas, -EBUSY);
+ return true;
+ }
+ }
+
+ if (mte_is_root(mas->node))
+ found = true;
+done:
+ mas->offset = offset;
+ return found;
+}
+
+/**
+ * mas_walk() - Search for @mas->index in the tree.
+ * @mas: The maple state.
+ *
+ * mas->index and mas->last will be set to the range if there is a value. If
+ * mas->node is MAS_NONE, reset to MAS_START.
+ *
+ * Return: the entry at the location or %NULL.
+ */
+void *mas_walk(struct ma_state *mas)
+{
+ void *entry;
+
+retry:
+ entry = mas_state_walk(mas);
+ if (mas_is_start(mas))
+ goto retry;
+
+ if (mas_is_ptr(mas)) {
+ if (!mas->index) {
+ mas->last = 0;
+ } else {
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ }
+ return entry;
+ }
+
+ if (mas_is_none(mas)) {
+ mas->index = 0;
+ mas->last = ULONG_MAX;
+ }
+
+ return entry;
+}
+
+static inline bool mas_rewind_node(struct ma_state *mas)
+{
+ unsigned char slot;
+
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas->offset;
+ if (!slot)
+ return false;
+ } else {
+ mas_ascend(mas);
+ slot = mas->offset;
+ }
+ } while (!slot);
+
+ mas->offset = --slot;
+ return true;
+}
+
+/*
+ * mas_skip_node() - Internal function. Skip over a node.
+ * @mas: The maple state.
+ *
+ * Return: true if there is another node, false otherwise.
+ */
+static inline bool mas_skip_node(struct ma_state *mas)
+{
+ unsigned char slot, slot_count;
+ unsigned long *pivots;
+ enum maple_type mt;
+
+ mt = mte_node_type(mas->node);
+ slot_count = mt_slots[mt] - 1;
+ do {
+ if (mte_is_root(mas->node)) {
+ slot = mas->offset;
+ if (slot > slot_count) {
+ mas_set_err(mas, -EBUSY);
+ return false;
+ }
+ } else {
+ mas_ascend(mas);
+ slot = mas->offset;
+ mt = mte_node_type(mas->node);
+ slot_count = mt_slots[mt] - 1;
+ }
+ } while (slot > slot_count);
+
+ mas->offset = ++slot;
+ pivots = ma_pivots(mas_mn(mas), mt);
+ if (slot > 0)
+ mas->min = pivots[slot - 1] + 1;
+
+ if (slot <= slot_count)
+ mas->max = pivots[slot];
+
+ return true;
+}
+
+/*
+ * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
+ * @size
+ * @mas: The maple state
+ * @size: The size of the gap required
+ *
+ * Search between @mas->index and @mas->last for a gap of @size.
+ */
+static inline void mas_awalk(struct ma_state *mas, unsigned long size)
+{
+ struct maple_enode *last = NULL;
+
+ /*
+ * There are 4 options:
+ * go to child (descend)
+ * go back to parent (ascend)
+ * no gap found. (return, slot == MAPLE_NODE_SLOTS)
+ * found the gap. (return, slot != MAPLE_NODE_SLOTS)
+ */
+ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
+ if (last == mas->node)
+ mas_skip_node(mas);
+ else
+ last = mas->node;
+ }
+}
+
+/*
+ * mas_fill_gap() - Fill a located gap with @entry.
+ * @mas: The maple state
+ * @entry: The value to store
+ * @slot: The offset into the node to store the @entry
+ * @size: The size of the entry
+ * @index: The start location
+ */
+static inline void mas_fill_gap(struct ma_state *mas, void *entry,
+ unsigned char slot, unsigned long size, unsigned long *index)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+ unsigned char pslot = mte_parent_slot(mas->node);
+ struct maple_enode *mn = mas->node;
+ unsigned long *pivots;
+ enum maple_type ptype;
+ /*
+ * mas->index is the start address for the search
+ * which may no longer be needed.
+ * mas->last is the end address for the search
+ */
+
+ *index = mas->index;
+ mas->last = mas->index + size - 1;
+
+ /*
+ * It is possible that using mas->max and mas->min to correctly
+ * calculate the index and last will cause an issue in the gap
+ * calculation, so fix the ma_state here
+ */
+ mas_ascend(mas);
+ ptype = mte_node_type(mas->node);
+ pivots = ma_pivots(mas_mn(mas), ptype);
+ mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
+ mas->min = mas_safe_min(mas, pivots, pslot);
+ mas->node = mn;
+ mas->offset = slot;
+ mas_wr_store_entry(&wr_mas);
+}
+
+/*
+ * mas_sparse_area() - Internal function. Return upper or lower limit when
+ * searching for a gap in an empty tree.
+ * @mas: The maple state
+ * @min: the minimum range
+ * @max: The maximum range
+ * @size: The size of the gap
+ * @fwd: Searching forward or back
+ */
+static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size, bool fwd)
+{
+ unsigned long start = 0;
+
+ if (!unlikely(mas_is_none(mas)))
+ start++;
+ /* mas_is_ptr */
+
+ if (start < min)
+ start = min;
+
+ if (fwd) {
+ mas->index = start;
+ mas->last = start + size - 1;
+ return;
+ }
+
+ mas->index = max;
+}
+
+/*
+ * mas_empty_area() - Get the lowest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ unsigned char offset;
+ unsigned long *pivots;
+ enum maple_type mt;
+
+ if (mas_is_start(mas))
+ mas_start(mas);
+ else if (mas->offset >= 2)
+ mas->offset -= 2;
+ else if (!mas_skip_node(mas))
+ return -EBUSY;
+
+ /* Empty set */
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_sparse_area(mas, min, max, size, true);
+ return 0;
+ }
+
+ /* The start of the window can only be within these values */
+ mas->index = min;
+ mas->last = max;
+ mas_awalk(mas, size);
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ offset = mas->offset;
+ if (unlikely(offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ mt = mte_node_type(mas->node);
+ pivots = ma_pivots(mas_mn(mas), mt);
+ if (offset)
+ mas->min = pivots[offset - 1] + 1;
+
+ if (offset < mt_pivots[mt])
+ mas->max = pivots[offset];
+
+ if (mas->index < mas->min)
+ mas->index = mas->min;
+
+ mas->last = mas->index + size - 1;
+ return 0;
+}
+
+/*
+ * mas_empty_area_rev() - Get the highest address within the range that is
+ * sufficient for the size requested.
+ * @mas: The maple state
+ * @min: The lowest value of the range
+ * @max: The highest value of the range
+ * @size: The size needed
+ */
+int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
+ unsigned long max, unsigned long size)
+{
+ struct maple_enode *last = mas->node;
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ mas->offset = mas_data_end(mas);
+ } else if (mas->offset >= 2) {
+ mas->offset -= 2;
+ } else if (!mas_rewind_node(mas)) {
+ return -EBUSY;
+ }
+
+ /* Empty set. */
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_sparse_area(mas, min, max, size, false);
+ return 0;
+ }
+
+ /* The start of the window can only be within these values. */
+ mas->index = min;
+ mas->last = max;
+
+ while (!mas_rev_awalk(mas, size)) {
+ if (last == mas->node) {
+ if (!mas_rewind_node(mas))
+ return -EBUSY;
+ } else {
+ last = mas->node;
+ }
+ }
+
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
+ return -EBUSY;
+
+ /*
+ * mas_rev_awalk() has set mas->min and mas->max to the gap values. If
+ * the maximum is outside the window we are searching, then use the last
+ * location in the search.
+ * mas->max and mas->min is the range of the gap.
+ * mas->index and mas->last are currently set to the search range.
+ */
+
+ /* Trim the upper limit to the max. */
+ if (mas->max <= mas->last)
+ mas->last = mas->max;
+
+ mas->index = mas->last - size + 1;
+ return 0;
+}
+
+static inline int mas_alloc(struct ma_state *mas, void *entry,
+ unsigned long size, unsigned long *index)
+{
+ unsigned long min;
+
+ mas_start(mas);
+ if (mas_is_none(mas) || mas_is_ptr(mas)) {
+ mas_root_expand(mas, entry);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (!mas->index)
+ return mte_pivot(mas->node, 0);
+ return mte_pivot(mas->node, 1);
+ }
+
+ /* Must be walking a tree. */
+ mas_awalk(mas, size);
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (mas->offset == MAPLE_NODE_SLOTS)
+ goto no_gap;
+
+ /*
+ * At this point, mas->node points to the right node and we have an
+ * offset that has a sufficient gap.
+ */
+ min = mas->min;
+ if (mas->offset)
+ min = mte_pivot(mas->node, mas->offset - 1) + 1;
+
+ if (mas->index < min)
+ mas->index = min;
+
+ mas_fill_gap(mas, entry, mas->offset, size, index);
+ return 0;
+
+no_gap:
+ return -EBUSY;
+}
+
+static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
+ unsigned long max, void *entry,
+ unsigned long size, unsigned long *index)
+{
+ int ret = 0;
+
+ ret = mas_empty_area_rev(mas, min, max, size);
+ if (ret)
+ return ret;
+
+ if (mas_is_err(mas))
+ return xa_err(mas->node);
+
+ if (mas->offset == MAPLE_NODE_SLOTS)
+ goto no_gap;
+
+ mas_fill_gap(mas, entry, mas->offset, size, index);
+ return 0;
+
+no_gap:
+ return -EBUSY;
+}
+
+/*
+ * mas_dead_leaves() - Mark all leaves of a node as dead.
+ * @mas: The maple state
+ * @slots: Pointer to the slot array
+ *
+ * Must hold the write lock.
+ *
+ * Return: The number of leaves marked as dead.
+ */
+static inline
+unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void *entry;
+ int offset;
+
+ for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
+ entry = mas_slot_locked(mas, slots, offset);
+ type = mte_node_type(entry);
+ node = mte_to_node(entry);
+ /* Use both node and type to catch LE & BE metadata */
+ if (!node || !type)
+ break;
+
+ mte_set_node_dead(entry);
+ smp_wmb(); /* Needed for RCU */
+ node->type = type;
+ rcu_assign_pointer(slots[offset], node);
+ }
+
+ return offset;
+}
+
+static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
+{
+ struct maple_node *node, *next;
+ void __rcu **slots = NULL;
+
+ next = mas_mn(mas);
+ do {
+ mas->node = ma_enode_ptr(next);
+ node = mas_mn(mas);
+ slots = ma_slots(node, node->type);
+ next = mas_slot_locked(mas, slots, offset);
+ offset = 0;
+ } while (!ma_is_leaf(next->type));
+
+ return slots;
+}
+
+static void mt_free_walk(struct rcu_head *head)
+{
+ void __rcu **slots;
+ struct maple_node *node, *start;
+ struct maple_tree mt;
+ unsigned char offset;
+ enum maple_type type;
+ MA_STATE(mas, &mt, 0, 0);
+
+ node = container_of(head, struct maple_node, rcu);
+
+ if (ma_is_leaf(node->type))
+ goto free_leaf;
+
+ mt_init_flags(&mt, node->ma_flags);
+ mas_lock(&mas);
+ start = node;
+ mas.node = mt_mk_node(node, node->type);
+ slots = mas_dead_walk(&mas, 0);
+ node = mas_mn(&mas);
+ do {
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ mas.node = node->piv_parent;
+ if (mas_mn(&mas) == node)
+ goto start_slots_free;
+
+ type = mte_node_type(mas.node);
+ slots = ma_slots(mte_to_node(mas.node), type);
+ if ((offset < mt_slots[type]) && (slots[offset]))
+ slots = mas_dead_walk(&mas, offset);
+
+ node = mas_mn(&mas);
+ } while ((node != start) || (node->slot_len < offset));
+
+ slots = ma_slots(node, node->type);
+ mt_free_bulk(node->slot_len, slots);
+
+start_slots_free:
+ mas_unlock(&mas);
+free_leaf:
+ mt_free_rcu(&node->rcu);
+}
+
+static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
+ struct maple_enode *prev, unsigned char offset)
+{
+ struct maple_node *node;
+ struct maple_enode *next = mas->node;
+ void __rcu **slots = NULL;
+
+ do {
+ mas->node = next;
+ node = mas_mn(mas);
+ slots = ma_slots(node, mte_node_type(mas->node));
+ next = mas_slot_locked(mas, slots, 0);
+ if ((mte_dead_node(next)))
+ next = mas_slot_locked(mas, slots, 1);
+
+ mte_set_node_dead(mas->node);
+ node->type = mte_node_type(mas->node);
+ node->piv_parent = prev;
+ node->parent_slot = offset;
+ offset = 0;
+ prev = mas->node;
+ } while (!mte_is_leaf(next));
+
+ return slots;
+}
+
+static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
+ bool free)
+{
+ void __rcu **slots;
+ struct maple_node *node = mte_to_node(enode);
+ struct maple_enode *start;
+ struct maple_tree mt;
+
+ MA_STATE(mas, &mt, 0, 0);
+
+ if (mte_is_leaf(enode))
+ goto free_leaf;
+
+ mt_init_flags(&mt, ma_flags);
+ mas_lock(&mas);
+
+ mas.node = start = enode;
+ slots = mas_destroy_descend(&mas, start, 0);
+ node = mas_mn(&mas);
+ do {
+ enum maple_type type;
+ unsigned char offset;
+ struct maple_enode *parent, *tmp;
+
+ node->slot_len = mas_dead_leaves(&mas, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+ offset = node->parent_slot + 1;
+ mas.node = node->piv_parent;
+ if (mas_mn(&mas) == node)
+ goto start_slots_free;
+
+ type = mte_node_type(mas.node);
+ slots = ma_slots(mte_to_node(mas.node), type);
+ if (offset >= mt_slots[type])
+ goto next;
+
+ tmp = mas_slot_locked(&mas, slots, offset);
+ if (mte_node_type(tmp) && mte_to_node(tmp)) {
+ parent = mas.node;
+ mas.node = tmp;
+ slots = mas_destroy_descend(&mas, parent, offset);
+ }
+next:
+ node = mas_mn(&mas);
+ } while (start != mas.node);
+
+ node = mas_mn(&mas);
+ node->slot_len = mas_dead_leaves(&mas, slots);
+ if (free)
+ mt_free_bulk(node->slot_len, slots);
+
+start_slots_free:
+ mas_unlock(&mas);
+
+free_leaf:
+ if (free)
+ mt_free_rcu(&node->rcu);
+}
+
+/*
+ * mte_destroy_walk() - Free a tree or sub-tree.
+ * @enode - the encoded maple node (maple_enode) to start
+ * @mn - the tree to free - needed for node types.
+ *
+ * Must hold the write lock.
+ */
+static inline void mte_destroy_walk(struct maple_enode *enode,
+ struct maple_tree *mt)
+{
+ struct maple_node *node = mte_to_node(enode);
+
+ if (mt_in_rcu(mt)) {
+ mt_destroy_walk(enode, mt->ma_flags, false);
+ call_rcu(&node->rcu, mt_free_walk);
+ } else {
+ mt_destroy_walk(enode, mt->ma_flags, true);
+ }
+}
+
+static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
+{
+ if (!mas_is_start(wr_mas->mas)) {
+ if (mas_is_none(wr_mas->mas)) {
+ mas_reset(wr_mas->mas);
+ } else {
+ wr_mas->r_max = wr_mas->mas->max;
+ wr_mas->type = mte_node_type(wr_mas->mas->node);
+ if (mas_is_span_wr(wr_mas))
+ mas_reset(wr_mas->mas);
+ }
+ }
+
+}
+
+/* Interface */
+
+/**
+ * mas_store() - Store an @entry.
+ * @mas: The maple state.
+ * @entry: The entry to store.
+ *
+ * The @mas->index and @mas->last is used to set the range for the @entry.
+ * Note: The @mas should have pre-allocated entries to ensure there is memory to
+ * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
+ *
+ * Return: the first entry between mas->index and mas->last or %NULL.
+ */
+void *mas_store(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ trace_ma_write(__func__, mas, 0, entry);
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if (mas->index > mas->last)
+ pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
+ MT_BUG_ON(mas->tree, mas->index > mas->last);
+ if (mas->index > mas->last) {
+ mas_set_err(mas, -EINVAL);
+ return NULL;
+ }
+
+#endif
+
+ /*
+ * Storing is the same operation as insert with the added caveat that it
+ * can overwrite entries. Although this seems simple enough, one may
+ * want to examine what happens if a single store operation was to
+ * overwrite multiple entries within a self-balancing B-Tree.
+ */
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ return wr_mas.content;
+}
+
+/**
+ * mas_store_gfp() - Store a value into the tree.
+ * @mas: The maple state
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations if necessary.
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (unlikely(mas_nomem(mas, gfp)))
+ goto retry;
+
+ if (unlikely(mas_is_err(mas)))
+ return xa_err(mas->node);
+
+ return 0;
+}
+
+/**
+ * mas_store_prealloc() - Store a value into the tree using memory
+ * preallocated in the maple state.
+ * @mas: The maple state
+ * @entry: The entry to store.
+ */
+void mas_store_prealloc(struct ma_state *mas, void *entry)
+{
+ MA_WR_STATE(wr_mas, mas, entry);
+
+ mas_wr_store_setup(&wr_mas);
+ trace_ma_write(__func__, mas, 0, entry);
+ mas_wr_store_entry(&wr_mas);
+ BUG_ON(mas_is_err(mas));
+ mas_destroy(mas);
+}
+
+/**
+ * mas_preallocate() - Preallocate enough nodes for a store operation
+ * @mas: The maple state
+ * @entry: The entry that will be stored
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
+{
+ int ret;
+
+ mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
+ mas->mas_flags |= MA_STATE_PREALLOC;
+ if (likely(!mas_is_err(mas)))
+ return 0;
+
+ mas_set_alloc_req(mas, 0);
+ ret = xa_err(mas->node);
+ mas_reset(mas);
+ mas_destroy(mas);
+ mas_reset(mas);
+ return ret;
+}
+
+/*
+ * mas_destroy() - destroy a maple state.
+ * @mas: The maple state
+ *
+ * Upon completion, check the left-most node and rebalance against the node to
+ * the right if necessary. Frees any allocated nodes associated with this maple
+ * state.
+ */
+void mas_destroy(struct ma_state *mas)
+{
+ struct maple_alloc *node;
+
+ /*
+ * When using mas_for_each() to insert an expected number of elements,
+ * it is possible that the number inserted is less than the expected
+ * number. To fix an invalid final node, a check is performed here to
+ * rebalance the previous node with the final node.
+ */
+ if (mas->mas_flags & MA_STATE_REBALANCE) {
+ unsigned char end;
+
+ if (mas_is_start(mas))
+ mas_start(mas);
+
+ mtree_range_walk(mas);
+ end = mas_data_end(mas) + 1;
+ if (end < mt_min_slot_count(mas->node) - 1)
+ mas_destroy_rebalance(mas, end);
+
+ mas->mas_flags &= ~MA_STATE_REBALANCE;
+ }
+ mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
+
+ while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
+ node = mas->alloc;
+ mas->alloc = node->slot[0];
+ if (node->node_count > 0)
+ mt_free_bulk(node->node_count,
+ (void __rcu **)&node->slot[1]);
+ kmem_cache_free(maple_node_cache, node);
+ }
+ mas->alloc = NULL;
+}
+
+/*
+ * mas_expected_entries() - Set the expected number of entries that will be inserted.
+ * @mas: The maple state
+ * @nr_entries: The number of expected entries.
+ *
+ * This will attempt to pre-allocate enough nodes to store the expected number
+ * of entries. The allocations will occur using the bulk allocator interface
+ * for speed. Please call mas_destroy() on the @mas after inserting the entries
+ * to ensure any unused nodes are freed.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated.
+ */
+int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
+{
+ int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
+ struct maple_enode *enode = mas->node;
+ int nr_nodes;
+ int ret;
+
+ /*
+ * Sometimes it is necessary to duplicate a tree to a new tree, such as
+ * forking a process and duplicating the VMAs from one tree to a new
+ * tree. When such a situation arises, it is known that the new tree is
+ * not going to be used until the entire tree is populated. For
+ * performance reasons, it is best to use a bulk load with RCU disabled.
+ * This allows for optimistic splitting that favours the left and reuse
+ * of nodes during the operation.
+ */
+
+ /* Optimize splitting for bulk insert in-order */
+ mas->mas_flags |= MA_STATE_BULK;
+
+ /*
+ * Avoid overflow, assume a gap between each entry and a trailing null.
+ * If this is wrong, it just means allocation can happen during
+ * insertion of entries.
+ */
+ nr_nodes = max(nr_entries, nr_entries * 2 + 1);
+ if (!mt_is_alloc(mas->tree))
+ nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
+
+ /* Leaves; reduce slots to keep space for expansion */
+ nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
+ /* Internal nodes */
+ nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
+ /* Add working room for split (2 nodes) + new parents */
+ mas_node_count(mas, nr_nodes + 3);
+
+ /* Detect if allocations run out */
+ mas->mas_flags |= MA_STATE_PREALLOC;
+
+ if (!mas_is_err(mas))
+ return 0;
+
+ ret = xa_err(mas->node);
+ mas->node = enode;
+ mas_destroy(mas);
+ return ret;
+
+}
+
+/**
+ * mas_next() - Get the next entry.
+ * @mas: The maple state
+ * @max: The maximum index to check.
+ *
+ * Returns the next entry after @mas->index.
+ * Must hold rcu_read_lock or the write lock.
+ * Can return the zero entry.
+ *
+ * Return: The next entry or %NULL
+ */
+void *mas_next(struct ma_state *mas, unsigned long max)
+{
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ if (mas_is_start(mas))
+ mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+
+ if (mas_is_ptr(mas)) {
+ if (!mas->index) {
+ mas->index = 1;
+ mas->last = ULONG_MAX;
+ }
+ return NULL;
+ }
+
+ if (mas->last == ULONG_MAX)
+ return NULL;
+
+ /* Retries on dead nodes handled by mas_next_entry */
+ return mas_next_entry(mas, max);
+}
+EXPORT_SYMBOL_GPL(mas_next);
+
+/**
+ * mt_next() - get the next value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @max: The maximum index to check
+ *
+ * Return: The entry at @index or higher, or %NULL if nothing is found.
+ */
+void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_next(&mas, max);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_next);
+
+/**
+ * mas_prev() - Get the previous entry
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * searchable nodes.
+ *
+ * Return: the previous value or %NULL.
+ */
+void *mas_prev(struct ma_state *mas, unsigned long min)
+{
+ if (!mas->index) {
+ /* Nothing comes before 0 */
+ mas->last = 0;
+ return NULL;
+ }
+
+ if (unlikely(mas_is_ptr(mas)))
+ return NULL;
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ if (mas_is_start(mas)) {
+ mas_walk(mas);
+ if (!mas->index)
+ return NULL;
+ }
+
+ if (mas_is_ptr(mas)) {
+ if (!mas->index) {
+ mas->last = 0;
+ return NULL;
+ }
+
+ mas->index = mas->last = 0;
+ return mas_root_locked(mas);
+ }
+ return mas_prev_entry(mas, min);
+}
+EXPORT_SYMBOL_GPL(mas_prev);
+
+/**
+ * mt_prev() - get the previous value in the maple tree
+ * @mt: The maple tree
+ * @index: The start index
+ * @min: The minimum index to check
+ *
+ * Return: The entry at @index or lower, or %NULL if nothing is found.
+ */
+void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
+{
+ void *entry = NULL;
+ MA_STATE(mas, mt, index, index);
+
+ rcu_read_lock();
+ entry = mas_prev(&mas, min);
+ rcu_read_unlock();
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mt_prev);
+
+/**
+ * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
+ * @mas: The maple state to pause
+ *
+ * Some users need to pause a walk and drop the lock they're holding in
+ * order to yield to a higher priority thread or carry out an operation
+ * on an entry. Those users should call this function before they drop
+ * the lock. It resets the @mas to be suitable for the next iteration
+ * of the loop after the user has reacquired the lock. If most entries
+ * found during a walk require you to call mas_pause(), the mt_for_each()
+ * iterator may be more appropriate.
+ *
+ */
+void mas_pause(struct ma_state *mas)
+{
+ mas->node = MAS_PAUSE;
+}
+EXPORT_SYMBOL_GPL(mas_pause);
+
+/**
+ * mas_find() - On the first call, find the entry at or after mas->index up to
+ * %max. Otherwise, find the entry after mas->index.
+ * @mas: The maple state
+ * @max: The maximum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find(struct ma_state *mas, unsigned long max)
+{
+ if (unlikely(mas_is_paused(mas))) {
+ if (unlikely(mas->last == ULONG_MAX)) {
+ mas->node = MAS_NONE;
+ return NULL;
+ }
+ mas->node = MAS_START;
+ mas->index = ++mas->last;
+ }
+
+ if (unlikely(mas_is_start(mas))) {
+ /* First run or continue */
+ void *entry;
+
+ if (mas->index > max)
+ return NULL;
+
+ entry = mas_walk(mas);
+ if (entry)
+ return entry;
+ }
+
+ if (unlikely(!mas_searchable(mas)))
+ return NULL;
+
+ /* Retries on dead nodes handled by mas_next_entry */
+ return mas_next_entry(mas, max);
+}
+
+/**
+ * mas_find_rev: On the first call, find the first non-null entry at or below
+ * mas->index down to %min. Otherwise find the first non-null entry below
+ * mas->index down to %min.
+ * @mas: The maple state
+ * @min: The minimum value to check.
+ *
+ * Must hold rcu_read_lock or the write lock.
+ * If an entry exists, last and index are updated accordingly.
+ * May set @mas->node to MAS_NONE.
+ *
+ * Return: The entry or %NULL.
+ */
+void *mas_find_rev(struct ma_state *mas, unsigned long min)
+{
+ if (unlikely(mas_is_paused(mas))) {
+ if (unlikely(mas->last == ULONG_MAX)) {
+ mas->node = MAS_NONE;
+ return NULL;
+ }
+ mas->node = MAS_START;
+ mas->last = --mas->index;
+ }
+
+ if (unlikely(mas_is_start(mas))) {
+ /* First run or continue */
+ void *entry;
+
+ if (mas->index < min)
+ return NULL;
+
+ entry = mas_walk(mas);
+ if (entry)
+ return entry;
+ }
+
+ if (unlikely(!mas_searchable(mas)))
+ return NULL;
+
+ if (mas->index < min)
+ return NULL;
+
+ /* Retries on dead nodes handled by mas_next_entry */
+ return mas_prev_entry(mas, min);
+}
+EXPORT_SYMBOL_GPL(mas_find);
+
+/**
+ * mas_erase() - Find the range in which index resides and erase the entire
+ * range.
+ * @mas: The maple state
+ *
+ * Must hold the write lock.
+ * Searches for @mas->index, sets @mas->index and @mas->last to the range and
+ * erases that range.
+ *
+ * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
+ */
+void *mas_erase(struct ma_state *mas)
+{
+ void *entry;
+ MA_WR_STATE(wr_mas, mas, NULL);
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+
+ /* Retry unnecessary when holding the write lock. */
+ entry = mas_state_walk(mas);
+ if (!entry)
+ return NULL;
+
+write_retry:
+ /* Must reset to ensure spanning writes of last slot are detected */
+ mas_reset(mas);
+ mas_wr_store_setup(&wr_mas);
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(mas, GFP_KERNEL))
+ goto write_retry;
+
+ return entry;
+}
+EXPORT_SYMBOL_GPL(mas_erase);
+
+/**
+ * mas_nomem() - Check if there was an error allocating and do the allocation
+ * if necessary If there are allocations, then free them.
+ * @mas: The maple state
+ * @gfp: The GFP_FLAGS to use for allocations
+ * Return: true on allocation, false otherwise.
+ */
+bool mas_nomem(struct ma_state *mas, gfp_t gfp)
+ __must_hold(mas->tree->lock)
+{
+ if (likely(mas->node != MA_ERROR(-ENOMEM))) {
+ mas_destroy(mas);
+ return false;
+ }
+
+ if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
+ mtree_unlock(mas->tree);
+ mas_alloc_nodes(mas, gfp);
+ mtree_lock(mas->tree);
+ } else {
+ mas_alloc_nodes(mas, gfp);
+ }
+
+ if (!mas_allocated(mas))
+ return false;
+
+ mas->node = MAS_START;
+ return true;
+}
+
+void __init maple_tree_init(void)
+{
+ maple_node_cache = kmem_cache_create("maple_node",
+ sizeof(struct maple_node), sizeof(struct maple_node),
+ SLAB_PANIC, NULL);
+}
+
+/**
+ * mtree_load() - Load a value stored in a maple tree
+ * @mt: The maple tree
+ * @index: The index to load
+ *
+ * Return: the entry or %NULL
+ */
+void *mtree_load(struct maple_tree *mt, unsigned long index)
+{
+ MA_STATE(mas, mt, index, index);
+ void *entry;
+
+ trace_ma_read(__func__, &mas);
+ rcu_read_lock();
+retry:
+ entry = mas_start(&mas);
+ if (unlikely(mas_is_none(&mas)))
+ goto unlock;
+
+ if (unlikely(mas_is_ptr(&mas))) {
+ if (index)
+ entry = NULL;
+
+ goto unlock;
+ }
+
+ entry = mtree_lookup_walk(&mas);
+ if (!entry && unlikely(mas_is_start(&mas)))
+ goto retry;
+unlock:
+ rcu_read_unlock();
+ if (xa_is_zero(entry))
+ return NULL;
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_load);
+
+/**
+ * mtree_store_range() - Store an entry at a given range.
+ * @mt: The maple tree
+ * @index: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store_range(struct maple_tree *mt, unsigned long index,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(mas, mt, index, last);
+ MA_WR_STATE(wr_mas, &mas, entry);
+
+ trace_ma_write(__func__, &mas, 0, entry);
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (index > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_wr_store_entry(&wr_mas);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&mas))
+ return xa_err(mas.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_store_range);
+
+/**
+ * mtree_store() - Store an entry at a given index.
+ * @mt: The maple tree
+ * @index: The index to store the value
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
+ * be allocated.
+ */
+int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_store_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_store);
+
+/**
+ * mtree_insert_range() - Insert an entry at a give range if there is no value.
+ * @mt: The maple tree
+ * @first: The start of the range
+ * @last: The end of the range
+ * @entry: The entry to store
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert_range(struct maple_tree *mt, unsigned long first,
+ unsigned long last, void *entry, gfp_t gfp)
+{
+ MA_STATE(ms, mt, first, last);
+
+ if (WARN_ON_ONCE(xa_is_advanced(entry)))
+ return -EINVAL;
+
+ if (first > last)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas_insert(&ms, entry);
+ if (mas_nomem(&ms, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ if (mas_is_err(&ms))
+ return xa_err(ms.node);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtree_insert_range);
+
+/**
+ * mtree_insert() - Insert an entry at a give index if there is no value.
+ * @mt: The maple tree
+ * @index : The index to store the value
+ * @entry: The entry to store
+ * @gfp: The FGP_FLAGS to use for allocations.
+ *
+ * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
+ * request, -ENOMEM if memory could not be allocated.
+ */
+int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
+ gfp_t gfp)
+{
+ return mtree_insert_range(mt, index, index, entry, gfp);
+}
+EXPORT_SYMBOL(mtree_insert);
+
+int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, min, max - size);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ if (min > max)
+ return -EINVAL;
+
+ if (max < size)
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ mas.offset = 0;
+ mas.index = min;
+ mas.last = max - size;
+ ret = mas_alloc(&mas, entry, size, startp);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_range);
+
+int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
+ void *entry, unsigned long size, unsigned long min,
+ unsigned long max, gfp_t gfp)
+{
+ int ret = 0;
+
+ MA_STATE(mas, mt, min, max - size);
+ if (!mt_is_alloc(mt))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(mt_is_reserved(entry)))
+ return -EINVAL;
+
+ if (min >= max)
+ return -EINVAL;
+
+ if (max < size - 1)
+ return -EINVAL;
+
+ if (!size)
+ return -EINVAL;
+
+ mtree_lock(mt);
+retry:
+ ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
+ if (mas_nomem(&mas, gfp))
+ goto retry;
+
+ mtree_unlock(mt);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_alloc_rrange);
+
+/**
+ * mtree_erase() - Find an index and erase the entire range.
+ * @mt: The maple tree
+ * @index: The index to erase
+ *
+ * Erasing is the same as a walk to an entry then a store of a NULL to that
+ * ENTIRE range. In fact, it is implemented as such using the advanced API.
+ *
+ * Return: The entry stored at the @index or %NULL
+ */
+void *mtree_erase(struct maple_tree *mt, unsigned long index)
+{
+ void *entry = NULL;
+
+ MA_STATE(mas, mt, index, index);
+ trace_ma_op(__func__, &mas);
+
+ mtree_lock(mt);
+ entry = mas_erase(&mas);
+ mtree_unlock(mt);
+
+ return entry;
+}
+EXPORT_SYMBOL(mtree_erase);
+
+/**
+ * __mt_destroy() - Walk and free all nodes of a locked maple tree.
+ * @mt: The maple tree
+ *
+ * Note: Does not handle locking.
+ */
+void __mt_destroy(struct maple_tree *mt)
+{
+ void *root = mt_root_locked(mt);
+
+ rcu_assign_pointer(mt->ma_root, NULL);
+ if (xa_is_node(root))
+ mte_destroy_walk(root, mt);
+
+ mt->ma_flags = 0;
+}
+EXPORT_SYMBOL_GPL(__mt_destroy);
+
+/**
+ * mtree_destroy() - Destroy a maple tree
+ * @mt: The maple tree
+ *
+ * Frees all resources used by the tree. Handles locking.
+ */
+void mtree_destroy(struct maple_tree *mt)
+{
+ mtree_lock(mt);
+ __mt_destroy(mt);
+ mtree_unlock(mt);
+}
+EXPORT_SYMBOL(mtree_destroy);
+
+/**
+ * mt_find() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value to check
+ *
+ * Handles locking. @index will be incremented to one beyond the range.
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
+{
+ MA_STATE(mas, mt, *index, *index);
+ void *entry;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ unsigned long copy = *index;
+#endif
+
+ trace_ma_read(__func__, &mas);
+
+ if ((*index) > max)
+ return NULL;
+
+ rcu_read_lock();
+retry:
+ entry = mas_state_walk(&mas);
+ if (mas_is_start(&mas))
+ goto retry;
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+
+ if (entry)
+ goto unlock;
+
+ while (mas_searchable(&mas) && (mas.index < max)) {
+ entry = mas_next_entry(&mas, max);
+ if (likely(entry && !xa_is_zero(entry)))
+ break;
+ }
+
+ if (unlikely(xa_is_zero(entry)))
+ entry = NULL;
+unlock:
+ rcu_read_unlock();
+ if (likely(entry)) {
+ *index = mas.last + 1;
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+ if ((*index) && (*index) <= copy)
+ pr_err("index not increased! %lx <= %lx\n",
+ *index, copy);
+ MT_BUG_ON(mt, (*index) && ((*index) <= copy));
+#endif
+ }
+
+ return entry;
+}
+EXPORT_SYMBOL(mt_find);
+
+/**
+ * mt_find_after() - Search from the start up until an entry is found.
+ * @mt: The maple tree
+ * @index: Pointer which contains the start location of the search
+ * @max: The maximum value to check
+ *
+ * Handles locking, detects wrapping on index == 0
+ *
+ * Return: The entry at or after the @index or %NULL
+ */
+void *mt_find_after(struct maple_tree *mt, unsigned long *index,
+ unsigned long max)
+{
+ if (!(*index))
+ return NULL;
+
+ return mt_find(mt, index, max);
+}
+EXPORT_SYMBOL(mt_find_after);
+
+#ifdef CONFIG_DEBUG_MAPLE_TREE
+atomic_t maple_tree_tests_run;
+EXPORT_SYMBOL_GPL(maple_tree_tests_run);
+atomic_t maple_tree_tests_passed;
+EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
+
+#ifndef __KERNEL__
+extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
+void mt_set_non_kernel(unsigned int val)
+{
+ kmem_cache_set_non_kernel(maple_node_cache, val);
+}
+
+extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
+unsigned long mt_get_alloc_size(void)
+{
+ return kmem_cache_get_alloc(maple_node_cache);
+}
+
+extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
+void mt_zero_nr_tallocated(void)
+{
+ kmem_cache_zero_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
+unsigned int mt_nr_tallocated(void)
+{
+ return kmem_cache_nr_tallocated(maple_node_cache);
+}
+
+extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
+unsigned int mt_nr_allocated(void)
+{
+ return kmem_cache_nr_allocated(maple_node_cache);
+}
+
+/*
+ * mas_dead_node() - Check if the maple state is pointing to a dead node.
+ * @mas: The maple state
+ * @index: The index to restore in @mas.
+ *
+ * Used in test code.
+ * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
+ */
+static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
+{
+ if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
+ return 0;
+
+ if (likely(!mte_dead_node(mas->node)))
+ return 0;
+
+ mas_rewalk(mas, index);
+ return 1;
+}
+#endif /* not defined __KERNEL__ */
+
+/*
+ * mas_get_slot() - Get the entry in the maple state node stored at @offset.
+ * @mas: The maple state
+ * @offset: The offset into the slot array to fetch.
+ *
+ * Return: The entry stored at @offset.
+ */
+static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
+ unsigned char offset)
+{
+ return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
+ offset);
+}
+
+
+/*
+ * mas_first_entry() - Go the first leaf and find the first entry.
+ * @mas: the maple state.
+ * @limit: the maximum index to check.
+ * @*r_start: Pointer to set to the range start.
+ *
+ * Sets mas->offset to the offset of the entry, r_start to the range minimum.
+ *
+ * Return: The first entry or MAS_NONE.
+ */
+static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
+ unsigned long limit, enum maple_type mt)
+
+{
+ unsigned long max;
+ unsigned long *pivots;
+ void __rcu **slots;
+ void *entry = NULL;
+
+ mas->index = mas->min;
+ if (mas->index > limit)
+ goto none;
+
+ max = mas->max;
+ mas->offset = 0;
+ while (likely(!ma_is_leaf(mt))) {
+ MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+ slots = ma_slots(mn, mt);
+ pivots = ma_pivots(mn, mt);
+ max = pivots[0];
+ entry = mas_slot(mas, slots, 0);
+ if (unlikely(ma_dead_node(mn)))
+ return NULL;
+ mas->node = entry;
+ mn = mas_mn(mas);
+ mt = mte_node_type(mas->node);
+ }
+ MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
+
+ mas->max = max;
+ slots = ma_slots(mn, mt);
+ entry = mas_slot(mas, slots, 0);
+ if (unlikely(ma_dead_node(mn)))
+ return NULL;
+
+ /* Slot 0 or 1 must be set */
+ if (mas->index > limit)
+ goto none;
+
+ if (likely(entry))
+ return entry;
+
+ pivots = ma_pivots(mn, mt);
+ mas->index = pivots[0] + 1;
+ mas->offset = 1;
+ entry = mas_slot(mas, slots, 1);
+ if (unlikely(ma_dead_node(mn)))
+ return NULL;
+
+ if (mas->index > limit)
+ goto none;
+
+ if (likely(entry))
+ return entry;
+
+none:
+ if (likely(!ma_dead_node(mn)))
+ mas->node = MAS_NONE;
+ return NULL;
+}
+
+/* Depth first search, post-order */
+static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
+{
+
+ struct maple_enode *p = MAS_NONE, *mn = mas->node;
+ unsigned long p_min, p_max;
+
+ mas_next_node(mas, mas_mn(mas), max);
+ if (!mas_is_none(mas))
+ return;
+
+ if (mte_is_root(mn))
+ return;
+
+ mas->node = mn;
+ mas_ascend(mas);
+ while (mas->node != MAS_NONE) {
+ p = mas->node;
+ p_min = mas->min;
+ p_max = mas->max;
+ mas_prev_node(mas, 0);
+ }
+
+ if (p == MAS_NONE)
+ return;
+
+ mas->node = p;
+ mas->max = p_max;
+ mas->min = p_min;
+}
+
+/* Tree validations */
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth);
+static void mt_dump_range(unsigned long min, unsigned long max,
+ unsigned int depth)
+{
+ static const char spaces[] = " ";
+
+ if (min == max)
+ pr_info("%.*s%lu: ", depth * 2, spaces, min);
+ else
+ pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
+}
+
+static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
+ unsigned int depth)
+{
+ mt_dump_range(min, max, depth);
+
+ if (xa_is_value(entry))
+ pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
+ xa_to_value(entry), entry);
+ else if (xa_is_zero(entry))
+ pr_cont("zero (%ld)\n", xa_to_internal(entry));
+ else if (mt_is_reserved(entry))
+ pr_cont("UNKNOWN ENTRY (%p)\n", entry);
+ else
+ pr_cont("%p\n", entry);
+}
+
+static void mt_dump_range64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth)
+{
+ struct maple_range_64 *node = &mte_to_node(entry)->mr64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_RANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth)
+{
+ struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
+ bool leaf = mte_is_leaf(entry);
+ unsigned long first = min;
+ int i;
+
+ pr_cont(" contents: ");
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
+ pr_cont("%lu ", node->gap[i]);
+ pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
+ pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
+ pr_cont("%p\n", node->slot[i]);
+ for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
+ unsigned long last = max;
+
+ if (i < (MAPLE_ARANGE64_SLOTS - 1))
+ last = node->pivot[i];
+ else if (!node->slot[i])
+ break;
+ if (last == 0 && i > 0)
+ break;
+ if (leaf)
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ first, last, depth + 1);
+ else if (node->slot[i])
+ mt_dump_node(mt, mt_slot(mt, node->slot, i),
+ first, last, depth + 1);
+
+ if (last == max)
+ break;
+ if (last > max) {
+ pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
+ node, last, max, i);
+ break;
+ }
+ first = last + 1;
+ }
+}
+
+static void mt_dump_node(const struct maple_tree *mt, void *entry,
+ unsigned long min, unsigned long max, unsigned int depth)
+{
+ struct maple_node *node = mte_to_node(entry);
+ unsigned int type = mte_node_type(entry);
+ unsigned int i;
+
+ mt_dump_range(min, max, depth);
+
+ pr_cont("node %p depth %d type %d parent %p", node, depth, type,
+ node ? node->parent : NULL);
+ switch (type) {
+ case maple_dense:
+ pr_cont("\n");
+ for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
+ if (min + i > max)
+ pr_cont("OUT OF RANGE: ");
+ mt_dump_entry(mt_slot(mt, node->slot, i),
+ min + i, min + i, depth);
+ }
+ break;
+ case maple_leaf_64:
+ case maple_range_64:
+ mt_dump_range64(mt, entry, min, max, depth);
+ break;
+ case maple_arange_64:
+ mt_dump_arange64(mt, entry, min, max, depth);
+ break;
+
+ default:
+ pr_cont(" UNKNOWN TYPE\n");
+ }
+}
+
+void mt_dump(const struct maple_tree *mt)
+{
+ void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
+
+ pr_info("maple_tree(%p) flags %X, height %u root %p\n",
+ mt, mt->ma_flags, mt_height(mt), entry);
+ if (!xa_is_node(entry))
+ mt_dump_entry(entry, 0, 0, 0);
+ else if (entry)
+ mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
+}
+
+/*
+ * Calculate the maximum gap in a node and check if that's what is reported in
+ * the parent (unless root).
+ */
+static void mas_validate_gaps(struct ma_state *mas)
+{
+ struct maple_enode *mte = mas->node;
+ struct maple_node *p_mn;
+ unsigned long gap = 0, max_gap = 0;
+ unsigned long p_end, p_start = mas->min;
+ unsigned char p_slot;
+ unsigned long *gaps = NULL;
+ unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
+ int i;
+
+ if (ma_is_dense(mte_node_type(mte))) {
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ if (mas_get_slot(mas, i)) {
+ if (gap > max_gap)
+ max_gap = gap;
+ gap = 0;
+ continue;
+ }
+ gap++;
+ }
+ goto counted;
+ }
+
+ gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
+ for (i = 0; i < mt_slot_count(mte); i++) {
+ p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
+
+ if (!gaps) {
+ if (mas_get_slot(mas, i)) {
+ gap = 0;
+ goto not_empty;
+ }
+
+ gap += p_end - p_start + 1;
+ } else {
+ void *entry = mas_get_slot(mas, i);
+
+ gap = gaps[i];
+ if (!entry) {
+ if (gap != p_end - p_start + 1) {
+ pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
+ mas_mn(mas), i,
+ mas_get_slot(mas, i), gap,
+ p_end, p_start);
+ mt_dump(mas->tree);
+
+ MT_BUG_ON(mas->tree,
+ gap != p_end - p_start + 1);
+ }
+ } else {
+ if (gap > p_end - p_start + 1) {
+ pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
+ mas_mn(mas), i, gap, p_end, p_start,
+ p_end - p_start + 1);
+ MT_BUG_ON(mas->tree,
+ gap > p_end - p_start + 1);
+ }
+ }
+ }
+
+ if (gap > max_gap)
+ max_gap = gap;
+not_empty:
+ p_start = p_end + 1;
+ if (p_end >= mas->max)
+ break;
+ }
+
+counted:
+ if (mte_is_root(mte))
+ return;
+
+ p_slot = mte_parent_slot(mas->node);
+ p_mn = mte_parent(mte);
+ MT_BUG_ON(mas->tree, max_gap > mas->max);
+ if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
+ pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
+ mt_dump(mas->tree);
+ }
+
+ MT_BUG_ON(mas->tree,
+ ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
+}
+
+static void mas_validate_parent_slot(struct ma_state *mas)
+{
+ struct maple_node *parent;
+ struct maple_enode *node;
+ enum maple_type p_type = mas_parent_enum(mas, mas->node);
+ unsigned char p_slot = mte_parent_slot(mas->node);
+ void __rcu **slots;
+ int i;
+
+ if (mte_is_root(mas->node))
+ return;
+
+ parent = mte_parent(mas->node);
+ slots = ma_slots(parent, p_type);
+ MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
+
+ /* Check prev/next parent slot for duplicate node entry */
+
+ for (i = 0; i < mt_slots[p_type]; i++) {
+ node = mas_slot(mas, slots, i);
+ if (i == p_slot) {
+ if (node != mas->node)
+ pr_err("parent %p[%u] does not have %p\n",
+ parent, i, mas_mn(mas));
+ MT_BUG_ON(mas->tree, node != mas->node);
+ } else if (node == mas->node) {
+ pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
+ mas_mn(mas), parent, i, p_slot);
+ MT_BUG_ON(mas->tree, node == mas->node);
+ }
+ }
+}
+
+static void mas_validate_child_slot(struct ma_state *mas)
+{
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
+ struct maple_enode *child;
+ unsigned char i;
+
+ if (mte_is_leaf(mas->node))
+ return;
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ child = mas_slot(mas, slots, i);
+ if (!pivots[i] || pivots[i] == mas->max)
+ break;
+
+ if (!child)
+ break;
+
+ if (mte_parent_slot(child) != i) {
+ pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
+ mas_mn(mas), i, mte_to_node(child),
+ mte_parent_slot(child));
+ MT_BUG_ON(mas->tree, 1);
+ }
+
+ if (mte_parent(child) != mte_to_node(mas->node)) {
+ pr_err("child %p has parent %p not %p\n",
+ mte_to_node(child), mte_parent(child),
+ mte_to_node(mas->node));
+ MT_BUG_ON(mas->tree, 1);
+ }
+ }
+}
+
+/*
+ * Validate all pivots are within mas->min and mas->max.
+ */
+static void mas_validate_limits(struct ma_state *mas)
+{
+ int i;
+ unsigned long prev_piv = 0;
+ enum maple_type type = mte_node_type(mas->node);
+ void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
+ unsigned long *pivots = ma_pivots(mas_mn(mas), type);
+
+ /* all limits are fine here. */
+ if (mte_is_root(mas->node))
+ return;
+
+ for (i = 0; i < mt_slots[type]; i++) {
+ unsigned long piv;
+
+ piv = mas_safe_pivot(mas, pivots, i, type);
+
+ if (!piv && (i != 0))
+ break;
+
+ if (!mte_is_leaf(mas->node)) {
+ void *entry = mas_slot(mas, slots, i);
+
+ if (!entry)
+ pr_err("%p[%u] cannot be null\n",
+ mas_mn(mas), i);
+
+ MT_BUG_ON(mas->tree, !entry);
+ }
+
+ if (prev_piv > piv) {
+ pr_err("%p[%u] piv %lu < prev_piv %lu\n",
+ mas_mn(mas), i, piv, prev_piv);
+ MT_BUG_ON(mas->tree, piv < prev_piv);
+ }
+
+ if (piv < mas->min) {
+ pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
+ piv, mas->min);
+ MT_BUG_ON(mas->tree, piv < mas->min);
+ }
+ if (piv > mas->max) {
+ pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
+ piv, mas->max);
+ MT_BUG_ON(mas->tree, piv > mas->max);
+ }
+ prev_piv = piv;
+ if (piv == mas->max)
+ break;
+ }
+ for (i += 1; i < mt_slots[type]; i++) {
+ void *entry = mas_slot(mas, slots, i);
+
+ if (entry && (i != mt_slots[type] - 1)) {
+ pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
+ i, entry);
+ MT_BUG_ON(mas->tree, entry != NULL);
+ }
+
+ if (i < mt_pivots[type]) {
+ unsigned long piv = pivots[i];
+
+ if (!piv)
+ continue;
+
+ pr_err("%p[%u] should not have piv %lu\n",
+ mas_mn(mas), i, piv);
+ MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
+ }
+ }
+}
+
+static void mt_validate_nulls(struct maple_tree *mt)
+{
+ void *entry, *last = (void *)1;
+ unsigned char offset = 0;
+ void __rcu **slots;
+ MA_STATE(mas, mt, 0, 0);
+
+ mas_start(&mas);
+ if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ return;
+
+ while (!mte_is_leaf(mas.node))
+ mas_descend(&mas);
+
+ slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
+ do {
+ entry = mas_slot(&mas, slots, offset);
+ if (!last && !entry) {
+ pr_err("Sequential nulls end at %p[%u]\n",
+ mas_mn(&mas), offset);
+ }
+ MT_BUG_ON(mt, !last && !entry);
+ last = entry;
+ if (offset == mas_data_end(&mas)) {
+ mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
+ if (mas_is_none(&mas))
+ return;
+ offset = 0;
+ slots = ma_slots(mte_to_node(mas.node),
+ mte_node_type(mas.node));
+ } else {
+ offset++;
+ }
+
+ } while (!mas_is_none(&mas));
+}
+
+/*
+ * validate a maple tree by checking:
+ * 1. The limits (pivots are within mas->min to mas->max)
+ * 2. The gap is correctly set in the parents
+ */
+void mt_validate(struct maple_tree *mt)
+{
+ unsigned char end;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_start(&mas);
+ if (!mas_searchable(&mas))
+ goto done;
+
+ mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
+ while (!mas_is_none(&mas)) {
+ MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
+ if (!mte_is_root(mas.node)) {
+ end = mas_data_end(&mas);
+ if ((end < mt_min_slot_count(mas.node)) &&
+ (mas.max != ULONG_MAX)) {
+ pr_err("Invalid size %u of %p\n", end,
+ mas_mn(&mas));
+ MT_BUG_ON(mas.tree, 1);
+ }
+
+ }
+ mas_validate_parent_slot(&mas);
+ mas_validate_child_slot(&mas);
+ mas_validate_limits(&mas);
+ if (mt_is_alloc(mt))
+ mas_validate_gaps(&mas);
+ mas_dfs_postorder(&mas, ULONG_MAX);
+ }
+ mt_validate_nulls(mt);
+done:
+ rcu_read_unlock();
+
+}
+
+#endif /* CONFIG_DEBUG_MAPLE_TREE */
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 1c26c14ffbb9..0d7585cde2a6 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -8,13 +8,13 @@
#include <linux/mm.h>
#include <linux/cma.h>
-void show_mem(unsigned int filter, nodemask_t *nodemask)
+void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, highmem = 0;
printk("Mem-Info:\n");
- show_free_areas(filter, nodemask);
+ __show_free_areas(filter, nodemask, max_zone_idx);
for_each_online_pgdat(pgdat) {
int zoneid;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index e73fda23388d..79e894cf8406 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -43,7 +43,8 @@
#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
STACK_ALLOC_ALIGN)
#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
- STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
+ STACK_ALLOC_NULL_PROTECTION_BITS - \
+ STACK_ALLOC_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
#define STACK_ALLOC_SLABS_CAP 8192
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -56,6 +57,7 @@ union handle_parts {
u32 slabindex : STACK_ALLOC_INDEX_BITS;
u32 offset : STACK_ALLOC_OFFSET_BITS;
u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
+ u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
@@ -77,6 +79,14 @@ static int next_slab_inited;
static size_t depot_offset;
static DEFINE_RAW_SPINLOCK(depot_lock);
+unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+
+ return parts.extra;
+}
+EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
static bool init_stack_slab(void **prealloc)
{
if (!*prealloc)
@@ -140,6 +150,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
stack->handle.slabindex = depot_index;
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
stack->handle.valid = 1;
+ stack->handle.extra = 0;
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
depot_offset += required_size;
@@ -382,6 +393,7 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
*
* @entries: Pointer to storage array
* @nr_entries: Size of the storage array
+ * @extra_bits: Flags to store in unused bits of depot_stack_handle_t
* @alloc_flags: Allocation gfp flags
* @can_alloc: Allocate stack slabs (increased chance of failure if false)
*
@@ -393,6 +405,10 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
* If the stack trace in @entries is from an interrupt, only the portion up to
* interrupt entry is saved.
*
+ * Additional opaque flags can be passed in @extra_bits, stored in the unused
+ * bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
+ * without calling stack_depot_fetch().
+ *
* Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor
@@ -402,10 +418,11 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
*/
depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
+ unsigned int extra_bits,
gfp_t alloc_flags, bool can_alloc)
{
struct stack_record *found = NULL, **bucket;
- depot_stack_handle_t retval = 0;
+ union handle_parts retval = { .handle = 0 };
struct page *page = NULL;
void *prealloc = NULL;
unsigned long flags;
@@ -489,9 +506,11 @@ exit:
free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
}
if (found)
- retval = found->handle.handle;
+ retval.handle = found->handle.handle;
fast_exit:
- return retval;
+ retval.extra = extra_bits;
+
+ return retval.handle;
}
EXPORT_SYMBOL_GPL(__stack_depot_save);
@@ -511,6 +530,6 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+ return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
diff --git a/lib/string.c b/lib/string.c
index 6f334420f687..3371d26a0e39 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -197,6 +197,14 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
max = 0;
#endif
+ /*
+ * read_word_at_a_time() below may read uninitialized bytes after the
+ * trailing zero and use them in comparisons. Disable this optimization
+ * under KMSAN to prevent false positive reports.
+ */
+ if (IS_ENABLED(CONFIG_KMSAN))
+ max = 0;
+
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index e3965cafd27c..6a33f6b1b465 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -107,8 +107,8 @@ struct dmirror_chunk {
*/
struct dmirror_device {
struct cdev cdevice;
- struct hmm_devmem *devmem;
unsigned int zone_device_type;
+ struct device device;
unsigned int devmem_capacity;
unsigned int devmem_count;
@@ -1390,7 +1390,14 @@ static int dmirror_device_init(struct dmirror_device *mdevice, int id)
cdev_init(&mdevice->cdevice, &dmirror_fops);
mdevice->cdevice.owner = THIS_MODULE;
- ret = cdev_add(&mdevice->cdevice, dev, 1);
+ device_initialize(&mdevice->device);
+ mdevice->device.devt = dev;
+
+ ret = dev_set_name(&mdevice->device, "hmm_dmirror%u", id);
+ if (ret)
+ return ret;
+
+ ret = cdev_device_add(&mdevice->cdevice, &mdevice->device);
if (ret)
return ret;
@@ -1416,7 +1423,7 @@ static void dmirror_device_remove(struct dmirror_device *mdevice)
kfree(mdevice->devmem_chunks);
}
- cdev_del(&mdevice->cdevice);
+ cdev_device_del(&mdevice->cdevice, &mdevice->device);
}
static int __init hmm_dmirror_init(void)
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
new file mode 100644
index 000000000000..4f69e009a015
--- /dev/null
+++ b/lib/test_maple_tree.c
@@ -0,0 +1,38307 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * test_maple_tree.c: Test the maple tree API
+ * Copyright (c) 2018 Liam R. Howlett
+ * Author: Liam R. Howlett <Liam.Howlett@Oracle.com>
+ */
+
+#include <linux/maple_tree.h>
+#include <linux/module.h>
+#include <stdlib.h>
+#include <time.h>
+
+#define MTREE_ALLOC_MAX 0x2000000000000Ul
+#define CONFIG_DEBUG_MAPLE_TREE
+#define CONFIG_MAPLE_SEARCH
+/* #define BENCH_SLOT_STORE */
+/* #define BENCH_NODE_STORE */
+/* #define BENCH_AWALK */
+/* #define BENCH_WALK */
+/* #define BENCH_MT_FOR_EACH */
+/* #define BENCH_FORK */
+static
+int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp)
+{
+ return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp);
+}
+
+static void mtree_erase_index(struct maple_tree *mt, unsigned long index)
+{
+ MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX));
+ MT_BUG_ON(mt, mtree_load(mt, index) != NULL);
+}
+
+static int mtree_test_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ return mtree_insert(mt, index, ptr, GFP_KERNEL);
+}
+
+static int mtree_test_store_range(struct maple_tree *mt, unsigned long start,
+ unsigned long end, void *ptr)
+{
+ return mtree_store_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static int mtree_test_store(struct maple_tree *mt, unsigned long start,
+ void *ptr)
+{
+ return mtree_test_store_range(mt, start, start, ptr);
+}
+
+static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start,
+ unsigned long end, void *ptr)
+{
+ return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL);
+}
+
+static void *mtree_test_load(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_load(mt, index);
+}
+
+static void *mtree_test_erase(struct maple_tree *mt, unsigned long index)
+{
+ return mtree_erase(mt, index);
+}
+
+static noinline void check_mtree_alloc_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_range(mt, &result, ptr, size, start, end,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+
+static noinline void check_mtree_alloc_rrange(struct maple_tree *mt,
+ unsigned long start, unsigned long end, unsigned long size,
+ unsigned long expected, int eret, void *ptr)
+{
+
+ unsigned long result = expected + 1;
+ int ret;
+
+ ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1,
+ GFP_KERNEL);
+ MT_BUG_ON(mt, ret != eret);
+ if (ret)
+ return;
+
+ MT_BUG_ON(mt, result != expected);
+}
+
+static noinline void check_load(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ void *ret = mtree_test_load(mt, index);
+
+ if (ret != ptr)
+ pr_err("Load %lu returned %p expect %p\n", index, ret, ptr);
+ MT_BUG_ON(mt, ret != ptr);
+}
+
+static noinline void check_store_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_store_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void check_insert_range(struct maple_tree *mt,
+ unsigned long start, unsigned long end, void *ptr, int expected)
+{
+ int ret = -EINVAL;
+ unsigned long i;
+
+ ret = mtree_test_insert_range(mt, start, end, ptr);
+ MT_BUG_ON(mt, ret != expected);
+
+ if (ret)
+ return;
+
+ for (i = start; i <= end; i++)
+ check_load(mt, i, ptr);
+}
+
+static noinline void check_insert(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != 0);
+}
+
+static noinline void check_erase(struct maple_tree *mt, unsigned long index,
+ void *ptr)
+{
+ MT_BUG_ON(mt, mtree_test_erase(mt, index) != ptr);
+}
+
+static noinline void check_dup_insert(struct maple_tree *mt,
+ unsigned long index, void *ptr)
+{
+ int ret = -EINVAL;
+
+ ret = mtree_test_insert(mt, index, ptr);
+ MT_BUG_ON(mt, ret != -EEXIST);
+}
+
+
+static noinline
+void check_index_load(struct maple_tree *mt, unsigned long index)
+{
+ return check_load(mt, index, xa_mk_value(index & LONG_MAX));
+}
+
+static noinline void check_nomem(struct maple_tree *mt)
+{
+ MA_STATE(ms, mt, 1, 1);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ /* Ensure no bypassing of allocation failures */
+ mt_set_non_kernel(0);
+
+ /* Storing something at 1 requires memory allocation */
+ MT_BUG_ON(mt, mtree_insert(mt, 1, &ms, GFP_ATOMIC) != -ENOMEM);
+ /* Storing something at 0 does not */
+ MT_BUG_ON(mt, mtree_insert(mt, 0, &ms, GFP_ATOMIC) != 0);
+
+ /*
+ * Simulate two threads racing; the first one fails to allocate
+ * memory to insert an entry at 1, then the second one succeeds
+ * in allocating memory to insert an entry at 2. The first one
+ * then needs to free the node it allocated. LeakSanitizer will
+ * notice this, as will the 'nr_allocated' debugging aid in the
+ * userspace test suite.
+ */
+ mtree_lock(mt);
+ mas_store(&ms, &ms); /* insert 1 -> &ms, fails. */
+ MT_BUG_ON(mt, ms.node != MA_ERROR(-ENOMEM));
+ mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
+ MT_BUG_ON(mt, ms.node != MAS_START);
+ mtree_unlock(mt);
+ MT_BUG_ON(mt, mtree_insert(mt, 2, mt, GFP_KERNEL) != 0);
+ mtree_lock(mt);
+ mas_store(&ms, &ms); /* insert 1 -> &ms */
+ mas_nomem(&ms, GFP_KERNEL); /* Node allocated in here. */
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+}
+
+static inline int not_empty(struct maple_node *node)
+{
+ int i;
+
+ if (node->parent)
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(node->slot); i++)
+ if (node->slot[i])
+ return 1;
+
+ return 0;
+}
+
+static noinline void check_new_node(struct maple_tree *mt)
+{
+
+ struct maple_node *mn, *mn2, *mn3;
+ struct maple_alloc *smn;
+ struct maple_node *nodes[100];
+ int i, j, total;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ /* Try allocating 3 nodes */
+ mtree_lock(mt);
+ /* request 3 nodes to be allocated. */
+ mas_node_count(&mas, 3);
+ /* Allocation request of 3. */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 3);
+ /* Allocate failed. */
+ MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ mas_push_node(&mas, mn);
+ mas_nomem(&mas, GFP_KERNEL); /* free */
+ mtree_unlock(mt);
+
+
+ /* Try allocating 1 node, then 2 more */
+ mtree_lock(mt);
+ /* Set allocation request to 1. */
+ mas_set_alloc_req(&mas, 1);
+ /* Check Allocation request of 1. */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
+ mas_set_err(&mas, -ENOMEM);
+ /* Validate allocation request. */
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ /* Eat the requested node. */
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] != NULL);
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ ma_free_rcu(mn);
+ mas.node = MAS_START;
+ mas_nomem(&mas, GFP_KERNEL);
+ /* Allocate 3 nodes, will fail. */
+ mas_node_count(&mas, 3);
+ /* Drop the lock and allocate 3 nodes. */
+ mas_nomem(&mas, GFP_KERNEL);
+ /* Ensure 3 are allocated. */
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ /* Allocation request of 0. */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 0);
+
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
+ /* Ensure we counted 3. */
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ /* Free. */
+ mas_nomem(&mas, GFP_KERNEL);
+
+ /* Set allocation request to 1. */
+ mas_set_alloc_req(&mas, 1);
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
+ mas_set_err(&mas, -ENOMEM);
+ /* Validate allocation request. */
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_allocated(&mas) != 1);
+ /* Check the node is only one node. */
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, mn->slot[0] != NULL);
+ MT_BUG_ON(mt, mn->slot[1] != NULL);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mas_push_node(&mas, mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 1);
+ MT_BUG_ON(mt, mas.alloc->node_count);
+
+ mas_set_alloc_req(&mas, 2); /* request 2 more. */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 2);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_allocated(&mas) != 3);
+ MT_BUG_ON(mt, mas.alloc == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[0] == NULL);
+ MT_BUG_ON(mt, mas.alloc->slot[1] == NULL);
+ for (i = 2; i >= 0; i--) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ MT_BUG_ON(mt, !mn);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ }
+
+ total = 64;
+ mas_set_alloc_req(&mas, total); /* request 2 more. */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != total);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (i = total; i > 0; i--) {
+ unsigned int e = 0; /* expected node_count */
+
+ if (i >= 35)
+ e = i - 35;
+ else if (i >= 5)
+ e = i - 5;
+ else if (i >= 2)
+ e = i - 2;
+ MT_BUG_ON(mt, mas.alloc->node_count != e);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - 1);
+ MT_BUG_ON(mt, !mn);
+ ma_free_rcu(mn);
+ }
+
+ total = 100;
+ for (i = 1; i < total; i++) {
+ mas_set_alloc_req(&mas, i);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (j = i; j > 0; j--) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
+ MT_BUG_ON(mt, !mn);
+ MT_BUG_ON(mt, not_empty(mn));
+ mas_push_node(&mas, mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != j);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != j - 1);
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ mas_set_alloc_req(&mas, i);
+ mas_set_err(&mas, -ENOMEM);
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ for (j = 0; j <= i/2; j++) {
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ nodes[j] = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ }
+
+ while (j) {
+ j--;
+ mas_push_node(&mas, nodes[j]);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != i);
+ for (j = 0; j <= i/2; j++) {
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != i - j - 1);
+ }
+ MT_BUG_ON(mt, mas_nomem(&mas, GFP_KERNEL));
+
+ }
+
+ /* Set allocation request. */
+ total = 500;
+ mas_node_count(&mas, total);
+ /* Drop the lock and allocate the nodes. */
+ mas_nomem(&mas, GFP_KERNEL);
+ MT_BUG_ON(mt, !mas.alloc);
+ i = 1;
+ smn = mas.alloc;
+ while (i < total) {
+ for (j = 0; j < MAPLE_ALLOC_SLOTS; j++) {
+ i++;
+ MT_BUG_ON(mt, !smn->slot[j]);
+ if (i == total)
+ break;
+ }
+ smn = smn->slot[0]; /* next. */
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != total);
+ mas_nomem(&mas, GFP_KERNEL); /* Free. */
+
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ for (i = 1; i < 128; i++) {
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
+ for (j = i; j > 0; j--) { /*Free the requests */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ }
+
+ for (i = 1; i < MAPLE_NODE_MASK + 1; i++) {
+ MA_STATE(mas2, mt, 0, 0);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ MT_BUG_ON(mt, mas_allocated(&mas) != i); /* check request filled */
+ for (j = 1; j <= i; j++) { /* Move the allocations to mas2 */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
+ mas_push_node(&mas2, mn);
+ MT_BUG_ON(mt, mas_allocated(&mas2) != j);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ MT_BUG_ON(mt, mas_allocated(&mas2) != i);
+
+ for (j = i; j > 0; j--) { /*Free the requests */
+ MT_BUG_ON(mt, mas_allocated(&mas2) != j);
+ mn = mas_pop_node(&mas2); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas2) != 0);
+ }
+
+
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 1); /* Request */
+ MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+ MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
+
+ mn = mas_pop_node(&mas); /* get the next node. */
+ MT_BUG_ON(mt, mn == NULL);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS);
+ MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 2);
+
+ mas_push_node(&mas, mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+ MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
+
+ /* Check the limit of pop/push/pop */
+ mas_node_count(&mas, MAPLE_ALLOC_SLOTS + 2); /* Request */
+ MT_BUG_ON(mt, mas_alloc_req(&mas) != 1);
+ MT_BUG_ON(mt, mas.node != MA_ERROR(-ENOMEM));
+ MT_BUG_ON(mt, !mas_nomem(&mas, GFP_KERNEL));
+ MT_BUG_ON(mt, mas_alloc_req(&mas));
+ MT_BUG_ON(mt, mas.alloc->node_count);
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 1);
+ MT_BUG_ON(mt, mas.alloc->node_count != MAPLE_ALLOC_SLOTS - 1);
+ mas_push_node(&mas, mn);
+ MT_BUG_ON(mt, mas.alloc->node_count);
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS + 2);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ for (i = 1; i <= MAPLE_ALLOC_SLOTS + 1; i++) {
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, not_empty(mn));
+ ma_free_rcu(mn);
+ }
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+
+ for (i = 3; i < MAPLE_NODE_MASK * 3; i++) {
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ mas_push_node(&mas, mn); /* put it back */
+ mas_destroy(&mas);
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ mn2 = mas_pop_node(&mas); /* get the next node. */
+ mas_push_node(&mas, mn); /* put them back */
+ mas_push_node(&mas, mn2);
+ mas_destroy(&mas);
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ mn2 = mas_pop_node(&mas); /* get the next node. */
+ mn3 = mas_pop_node(&mas); /* get the next node. */
+ mas_push_node(&mas, mn); /* put them back */
+ mas_push_node(&mas, mn2);
+ mas_push_node(&mas, mn3);
+ mas_destroy(&mas);
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ ma_free_rcu(mn);
+ mas_destroy(&mas);
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, i); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mn = mas_pop_node(&mas); /* get the next node. */
+ ma_free_rcu(mn);
+ mn = mas_pop_node(&mas); /* get the next node. */
+ ma_free_rcu(mn);
+ mn = mas_pop_node(&mas); /* get the next node. */
+ ma_free_rcu(mn);
+ mas_destroy(&mas);
+ }
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, 5); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ MT_BUG_ON(mt, mas_allocated(&mas) != 5);
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, 10); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mas.node = MAS_START;
+ MT_BUG_ON(mt, mas_allocated(&mas) != 10);
+ mas_destroy(&mas);
+
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, MAPLE_ALLOC_SLOTS - 1); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ MT_BUG_ON(mt, mas_allocated(&mas) != MAPLE_ALLOC_SLOTS - 1);
+ mas.node = MA_ERROR(-ENOMEM);
+ mas_node_count(&mas, 10 + MAPLE_ALLOC_SLOTS - 1); /* Request */
+ mas_nomem(&mas, GFP_KERNEL); /* Fill request */
+ mas.node = MAS_START;
+ MT_BUG_ON(mt, mas_allocated(&mas) != 10 + MAPLE_ALLOC_SLOTS - 1);
+ mas_destroy(&mas);
+
+ mtree_unlock(mt);
+}
+
+static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max,
+ bool verbose)
+{
+ unsigned long i = max, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ while (i) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = i; j <= max; j++)
+ check_index_load(mt, j);
+
+ check_load(mt, i - 1, NULL);
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mt_clear_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ i--;
+ }
+ check_load(mt, max + 1, NULL);
+
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt);
+ pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n",
+ __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+}
+
+static noinline void check_seq(struct maple_tree *mt, unsigned long max,
+ bool verbose)
+{
+ unsigned long i, j;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ mt_zero_nr_tallocated();
+ for (i = 0; i <= max; i++) {
+ MT_BUG_ON(mt, mtree_insert_index(mt, i, GFP_KERNEL));
+ for (j = 0; j <= i; j++)
+ check_index_load(mt, j);
+
+ if (i)
+ MT_BUG_ON(mt, !mt_height(mt));
+ check_load(mt, i + 1, NULL);
+ }
+ if (verbose) {
+ rcu_barrier();
+ mt_dump(mt);
+ pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n",
+ max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ mt_nr_tallocated());
+ }
+}
+
+static noinline void check_lb_not_empty(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 4000UL * 1000 * 1000;
+
+
+ i = huge;
+ while (i > 4096) {
+ check_insert(mt, i, (void *) i);
+ for (j = huge; j >= i; j /= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i /= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void check_lower_bound_split(struct maple_tree *mt)
+{
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_lb_not_empty(mt);
+}
+
+static noinline void check_upper_bound_split(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ unsigned long huge = 4000UL * 1000 * 1000;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+
+ i = 4096;
+ while (i < huge) {
+ check_insert(mt, i, (void *) i);
+ for (j = i; j >= huge; j *= 2) {
+ check_load(mt, j-1, NULL);
+ check_load(mt, j, (void *) j);
+ check_load(mt, j+1, NULL);
+ }
+ i *= 2;
+ }
+ mtree_destroy(mt);
+}
+
+static noinline void check_mid_split(struct maple_tree *mt)
+{
+ unsigned long huge = 8000UL * 1000 * 1000;
+
+ check_insert(mt, huge, (void *) huge);
+ check_insert(mt, 0, xa_mk_value(0));
+ check_lb_not_empty(mt);
+}
+
+static noinline void check_rev_find(struct maple_tree *mt)
+{
+ int i, nr_entries = 200;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 1000);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 999);
+ val = mas_find_rev(&mas, 997);
+ MT_BUG_ON(mt, val != NULL);
+
+ mas_set(&mas, 1000);
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(100));
+ val = mas_find_rev(&mas, 900);
+ MT_BUG_ON(mt, val != xa_mk_value(99));
+
+ mas_set(&mas, 20);
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(2));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ val = mas_find_rev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+}
+
+static noinline void check_find(struct maple_tree *mt)
+{
+ unsigned long val = 0;
+ unsigned long count = 20;
+ unsigned long max;
+ unsigned long last = 0, index = 0;
+ void *entry, *entry2;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ /* Insert 0. */
+ MT_BUG_ON(mt, mtree_insert_index(mt, val++, GFP_KERNEL));
+
+ for (int i = 0; i <= count; i++) {
+ if (val != 64)
+ MT_BUG_ON(mt, mtree_insert_index(mt, val, GFP_KERNEL));
+ else
+ MT_BUG_ON(mt, mtree_insert(mt, val,
+ XA_ZERO_ENTRY, GFP_KERNEL));
+
+ val <<= 2;
+ }
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ while ((entry = mas_find(&mas, 268435456)) != NULL) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+ mas_unlock(&mas);
+
+ /* Test mas_pause */
+ val = 0;
+ mas_set(&mas, val);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val != 64)
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ else
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ val = 0;
+ max = 300; /* A value big enough to include XA_ZERO_ENTRY at 64. */
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ }
+
+ val = 0;
+ max = 0;
+ index = 0;
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+ mt_for_each(mt, entry, index, ULONG_MAX) {
+ if (val == 4398046511104)
+ MT_BUG_ON(mt, entry !=
+ xa_mk_value(ULONG_MAX & LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+ if (val == 64) /* Skip zero entry. */
+ val <<= 2;
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ max++;
+ MT_BUG_ON(mt, max > 25);
+ }
+ mtree_erase_index(mt, ULONG_MAX);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 512);
+ MT_BUG_ON(mt, xa_mk_value(256) != entry);
+
+ mas_reset(&mas);
+ index = 17;
+ entry = mt_find(mt, &index, 20);
+ MT_BUG_ON(mt, entry != NULL);
+
+
+ /* Range check.. */
+ /* Insert ULONG_MAX */
+ MT_BUG_ON(mt, mtree_insert_index(mt, ULONG_MAX, GFP_KERNEL));
+
+ val = 0;
+ mas_set(&mas, 0);
+ mas_lock(&mas);
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (val == 64)
+ MT_BUG_ON(mt, entry != XA_ZERO_ENTRY);
+ else if (val == 4398046511104)
+ MT_BUG_ON(mt, entry != xa_mk_value(ULONG_MAX & LONG_MAX));
+ else
+ MT_BUG_ON(mt, xa_mk_value(val) != entry);
+ val <<= 2;
+
+ /* For zero check. */
+ if (!val)
+ val = 1;
+ mas_pause(&mas);
+ mas_unlock(&mas);
+ mas_lock(&mas);
+ }
+ mas_unlock(&mas);
+
+ mas_set(&mas, 1048576);
+ mas_lock(&mas);
+ entry = mas_find(&mas, 1048576);
+ mas_unlock(&mas);
+ MT_BUG_ON(mas.tree, entry == NULL);
+
+ /*
+ * Find last value.
+ * 1. get the expected value, leveraging the existence of an end entry
+ * 2. delete end entry
+ * 3. find the last value but searching for ULONG_MAX and then using
+ * prev
+ */
+ /* First, get the expected result. */
+ mas_lock(&mas);
+ mas_reset(&mas);
+ mas.index = ULONG_MAX; /* start at max.. */
+ entry = mas_find(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0);
+ index = mas.index;
+ last = mas.last;
+
+ /* Erase the last entry. */
+ mas_reset(&mas);
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ mas_erase(&mas);
+
+ /* Get the previous value from MAS_START */
+ mas_reset(&mas);
+ entry2 = mas_prev(&mas, 0);
+
+ /* Check results. */
+ MT_BUG_ON(mt, entry != entry2);
+ MT_BUG_ON(mt, index != mas.index);
+ MT_BUG_ON(mt, last != mas.last);
+
+
+ mas.node = MAS_NONE;
+ mas.index = ULONG_MAX;
+ mas.last = ULONG_MAX;
+ entry2 = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != entry2);
+
+ mas_set(&mas, 0);
+ MT_BUG_ON(mt, mas_prev(&mas, 0) != NULL);
+
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+}
+
+static noinline void check_find_2(struct maple_tree *mt)
+{
+ unsigned long i, j;
+ void *entry;
+
+ MA_STATE(mas, mt, 0, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX)
+ MT_BUG_ON(mt, true);
+ rcu_read_unlock();
+
+ for (i = 0; i < 256; i++) {
+ mtree_insert_index(mt, i, GFP_KERNEL);
+ j = 0;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != i + 1);
+ }
+
+ for (i = 0; i < 256; i++) {
+ mtree_erase_index(mt, i);
+ j = i + 1;
+ mas_set(&mas, 0);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j++;
+ }
+ rcu_read_unlock();
+ MT_BUG_ON(mt, j != 256);
+ }
+
+ /*MT_BUG_ON(mt, !mtree_empty(mt)); */
+}
+
+#define erase_ptr(i) entry[i%2]
+#define erase_check_load(mt, i) check_load(mt, set[i], entry[i%2])
+#define erase_check_insert(mt, i) check_insert(mt, set[i], entry[i%2])
+#define erase_check_erase(mt, i) check_erase(mt, set[i], entry[i%2])
+
+static noinline void check_erase_testset(struct maple_tree *mt)
+{
+ unsigned long set[] = { 5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 6003, 6002, 6008, 6012, 6015,
+ 7003, 7002, 7008, 7012, 7015,
+ 8003, 8002, 8008, 8012, 8015,
+ 9003, 9002, 9008, 9012, 9015,
+ 10003, 10002, 10008, 10012, 10015,
+ 11003, 11002, 11008, 11012, 11015,
+ 12003, 12002, 12008, 12012, 12015,
+ 13003, 13002, 13008, 13012, 13015,
+ 14003, 14002, 14008, 14012, 14015,
+ 15003, 15002, 15008, 15012, 15015,
+ };
+
+
+ void *ptr = &set;
+ void *entry[2] = { ptr, mt };
+ void *root_node;
+
+
+ rcu_register_thread();
+ mt_set_in_rcu(mt);
+ for (int i = 0; i < 4; i++)
+ erase_check_insert(mt, i);
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 1);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
+
+
+ erase_check_erase(mt, 2);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ check_load(mt, set[2], NULL);
+
+ erase_check_insert(mt, 1);
+ erase_check_insert(mt, 2);
+
+ for (int i = 0; i < 4; i++)
+ erase_check_load(mt, i);
+
+ /* Check erase and load without an allocation. */
+ erase_check_load(mt, 3);
+ erase_check_erase(mt, 1);
+ erase_check_load(mt, 0);
+ check_load(mt, set[1], NULL);
+ for (int i = 2; i < 4; i++)
+ erase_check_load(mt, i);
+
+ /*
+ * Set the newly erased node. This will produce a different allocated
+ * node to avoid busy slots.
+ */
+ root_node = mt->ma_root;
+ erase_check_insert(mt, 1);
+
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ erase_check_load(mt, 2);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 3);
+
+ erase_check_erase(mt, 2); /* erase 5017 to check append */
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+
+ erase_check_load(mt, 3);
+
+ root_node = mt->ma_root;
+ erase_check_insert(mt, 2);
+
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ erase_check_load(mt, 2);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 3);
+
+ mt_set_non_kernel(1);
+ erase_check_erase(mt, 2); /* erase 5017 to check append */
+ erase_check_load(mt, 0);
+ check_load(mt, 5016, NULL);
+ check_load(mt, set[2], NULL);
+ erase_check_erase(mt, 0); /* erase 5015 to check append */
+ check_load(mt, set[0], NULL);
+ check_load(mt, 5016, NULL);
+ erase_check_insert(mt, 4); /* 1000 < Should not split. */
+ check_load(mt, set[0], NULL);
+ check_load(mt, 5016, NULL);
+ erase_check_load(mt, 1);
+ check_load(mt, 5013, NULL);
+ check_load(mt, set[2], NULL);
+ check_load(mt, 5018, NULL);
+ erase_check_load(mt, 4);
+ check_load(mt, 999, NULL);
+ check_load(mt, 1001, NULL);
+ erase_check_load(mt, 4);
+ if (mt_in_rcu(mt))
+ MT_BUG_ON(mt, root_node == mt->ma_root);
+ else
+ MT_BUG_ON(mt, root_node != mt->ma_root);
+
+ /* Should not have split. */
+ MT_BUG_ON(mt, !mte_is_leaf(mt->ma_root));
+
+
+ /* Coalesce testing */
+ erase_check_insert(mt, 0);
+ erase_check_insert(mt, 2);
+
+ for (int i = 5; i < 25; i++) {
+ erase_check_insert(mt, i);
+ for (int j = i; j >= 0; j--)
+ erase_check_load(mt, j);
+ }
+
+ erase_check_erase(mt, 14); /*6015 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ erase_check_erase(mt, 16); /*7002 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+
+ mt_set_non_kernel(1);
+ erase_check_erase(mt, 13); /*6012 */
+ for (int i = 0; i < 25; i++) {
+ if (i == 16 || i == 14 || i == 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 15); /*7003 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 16 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 17); /*7008 *should* cause coalesce. */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 17 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 18); /*7012 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 18 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 19); /*7015 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 19 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 20); /*8003 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 20 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ erase_check_erase(mt, 21); /*8002 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 21 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ mt_set_non_kernel(2);
+ erase_check_erase(mt, 22); /*8008 */
+ for (int i = 0; i < 25; i++) {
+ if (i <= 22 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+ for (int i = 23; i < 25; i++)
+ erase_check_erase(mt, i);
+
+ for (int i = 0; i < 25; i++) {
+ if (i <= 25 && i >= 13)
+ check_load(mt, set[i], NULL);
+ else
+ erase_check_load(mt, i);
+ }
+
+ /* Shrinking tree test. */
+
+ for (int i = 13; i < ARRAY_SIZE(set); i++)
+ erase_check_insert(mt, i);
+
+ mt_set_non_kernel(99);
+ for (int i = 18; i < ARRAY_SIZE(set); i++) {
+ erase_check_erase(mt, i);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 || j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ mt_set_non_kernel(35);
+ for (int i = 0; i < 18; i++) {
+ erase_check_erase(mt, i);
+ for (int j = 0; j < ARRAY_SIZE(set); j++) {
+ if (j < 18 && j > i)
+ erase_check_load(mt, j);
+ else
+ check_load(mt, set[j], NULL);
+ }
+ }
+ erase_check_insert(mt, 8);
+ erase_check_insert(mt, 9);
+ erase_check_erase(mt, 8);
+ rcu_unregister_thread();
+}
+
+#define erase_check_store_range(mt, a, i, ptr) mtree_test_store_range(mt, \
+ a[(i)], a[(i + 1)], ptr)
+#define STORE 1
+#define SNULL 2
+#define ERASE 3
+#define ec_type_str(x) \
+ (((x) == STORE) ? \
+ "STORE" : \
+ (((x) == SNULL) ? \
+ "SNULL" : "ERASE") \
+ )
+#define check_erase2_debug 0
+void *mas_next(struct ma_state *mas, unsigned long max);
+
+/* Calculate the overwritten entries. */
+int mas_ce2_over_count(struct ma_state *mas_start, struct ma_state *mas_end,
+ void *s_entry, unsigned long s_min,
+ void *e_entry, unsigned long e_max,
+ unsigned long *set, int i, bool null_entry)
+{
+ int count = 0, span = 0;
+ unsigned long retry = 0;
+ void *entry;
+ struct ma_state tmp;
+
+
+ /* count slots */
+ memcpy(&tmp, mas_start, sizeof(tmp));
+ entry = mas_next(&tmp, mas_end->last);
+ while (entry) {
+ BUG_ON(retry > 50); /* stop infinite retry on testing. */
+ if (xa_is_zero(s_entry)) {
+ retry++;
+ continue;
+ }
+ count++;
+ span++;
+ entry = mas_next(&tmp, mas_end->last);
+ }
+
+ if (null_entry) {
+ /* Check splitting end. */
+ if (e_entry && (e_max > mas_end->last))
+ count--;
+
+ /* check overwrite of entire start */
+ if (s_entry && (s_min == mas_start->index))
+ count++;
+ } else { /* !null_entry (store) */
+ bool esplit = e_max > mas_end->last;
+ bool ssplit = s_min != mas_start->index;
+
+ if (s_entry && e_entry) {
+ if (esplit && ssplit)
+ count--;
+ else if (ssplit)
+ count--;
+ else if (esplit) {
+ if (span)
+ count--;
+ }
+ } else if (s_entry && !e_entry) {
+ if (ssplit)
+ count--;
+ } else if (!s_entry && e_entry) {
+ if (esplit)
+ count--;
+ count--;
+ } else {
+ count--;
+ }
+ }
+ return count;
+}
+
+/*
+ * mas_node_walk() - Walk a maple node to offset of the index.
+ * @mas: The maple state
+ * @type: The maple node type
+ * @*range_min: Pointer to store the minimum range of the offset
+ * @*range_max: Pointer to store the maximum range of the offset
+ *
+ * The offset will be stored in the maple state.
+ *
+ */
+static inline void mas_node_walk(struct ma_state *mas, struct maple_node *node,
+ enum maple_type type, unsigned long *range_min,
+ unsigned long *range_max)
+
+{
+ unsigned long *pivots;
+ unsigned char count;
+ unsigned long prev, max;
+ unsigned char offset;
+ unsigned long index;
+
+ if (unlikely(ma_is_dense(type))) {
+ (*range_max) = (*range_min) = mas->index;
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ mas->offset = mas->index = mas->min;
+ return;
+ }
+
+ pivots = ma_pivots(node, type);
+ max = pivots[0];
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ offset = 0;
+ prev = mas->min;
+ index = mas->index;
+ if (unlikely(index <= max))
+ goto offset_zero;
+
+ count = mt_pivots[type];
+ while (++offset < count) {
+ prev = max;
+ max = pivots[offset];
+ if (unlikely(ma_dead_node(node)))
+ return;
+
+ if (index <= max)
+ goto offset_found;
+ else if (unlikely(!max))
+ goto mas_max;
+ }
+
+ prev = max;
+mas_max:
+ max = mas->max;
+offset_found:
+ prev++;
+offset_zero:
+ mas->offset = offset;
+ if (ma_is_leaf(type)) {
+ *range_max = max;
+ *range_min = prev;
+ } else {
+ mas->max = max;
+ mas->min = prev;
+ }
+}
+
+/*
+ * mas_descend_walk(): Locates a value and sets the mas->node and slot
+ * accordingly. range_min and range_max are set to the range which the entry is
+ * valid.
+ * @mas: The maple state
+ * @*range_min: A pointer to store the minimum of the range
+ * @*range_max: A pointer to store the maximum of the range
+ *
+ * Check mas->node is still valid on return of any value.
+ *
+ * Return: true if pointing to a valid node and offset. False otherwise.
+ */
+static inline bool mas_descend_walk(struct ma_state *mas,
+ unsigned long *range_min, unsigned long *range_max)
+{
+ struct maple_enode *next;
+ struct maple_node *node;
+ enum maple_type type;
+
+ next = mas->node;
+ while (true) {
+ node = mte_to_node(next);
+ type = mte_node_type(next);
+ mas_node_walk(mas, node, type, range_min, range_max);
+ next = mas_slot(mas, ma_slots(node, type), mas->offset);
+ if (unlikely(ma_dead_node(node)))
+ return false;
+
+ if (unlikely(ma_is_leaf(type)))
+ return true;
+
+ /* Descend. */
+ mas->node = next;
+ }
+ return false;
+}
+
+/*
+ * mas_tree_walk() - Walk to @mas->index and set the range values.
+ * @mas: The maple state.
+ * @*range_min: The minimum range to be set.
+ * @*range_max: The maximum range to be set.
+ *
+ * Ranges are only valid if there is a valid entry at @mas->index.
+ *
+ * Return: True if a value exists, false otherwise.
+ */
+static inline bool mas_tree_walk(struct ma_state *mas, unsigned long *range_min,
+ unsigned long *range_max)
+{
+ bool ret;
+
+retry:
+ ret = false;
+ mas_start(mas);
+ if (mas_is_none(mas))
+ goto not_found;
+
+ if (mas_is_ptr(mas)) {
+ *range_min = *range_max = 0;
+ if (!mas->index)
+ return true;
+
+ goto not_found;
+ }
+
+ ret = mas_descend_walk(mas, range_min, range_max);
+ if (unlikely(mte_dead_node(mas->node))) {
+ mas->node = MAS_START;
+ goto retry;
+ }
+
+ return ret;
+
+not_found:
+ mas->offset = MAPLE_NODE_SLOTS;
+ return false;
+}
+
+static inline void *mas_range_load(struct ma_state *mas,
+ unsigned long *range_min, unsigned long *range_max)
+
+{
+ void *entry = NULL;
+ unsigned long index = mas->index;
+
+ if (mas_is_none(mas) || mas_is_paused(mas))
+ mas->node = MAS_START;
+retry:
+ if (mas_tree_walk(mas, range_min, range_max))
+ if (unlikely(mas->node == MAS_ROOT))
+ return mas_root(mas);
+
+ if (likely(mas->offset != MAPLE_NODE_SLOTS))
+ entry = mas_get_slot(mas, mas->offset);
+
+ if (mas_dead_node(mas, index))
+ goto retry;
+
+ return entry;
+}
+static noinline void check_erase2_testset(struct maple_tree *mt,
+ unsigned long *set, unsigned long size)
+{
+ int entry_count = 0;
+ int check = 0;
+ void *foo;
+ unsigned long addr = 0;
+ void *s_entry = NULL, *e_entry = NULL;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ for (int i = 0; i < size; i += 3) {
+ unsigned long s_min, s_max;
+ unsigned long e_min, e_max;
+ void *value = NULL;
+
+ MA_STATE(mas_start, mt, set[i+1], set[i+1]);
+ MA_STATE(mas_end, mt, set[i+2], set[i+2]);
+ mt_set_non_kernel(127);
+#if check_erase2_debug
+ pr_err("%s: %d %s %lu - %lu\n", __func__, i,
+ ec_type_str(set[i]),
+ set[i+1], set[i+2]);
+#endif
+ s_entry = mas_range_load(&mas_start, &s_min, &s_max);
+ e_entry = mas_range_load(&mas_end, &e_min, &e_max);
+
+ switch (set[i]) {
+ case SNULL:
+ if ((s_min == set[i+1]) && (s_max == set[i+2])) {
+ if (s_entry)
+ entry_count--;
+ } else if ((s_min != set[i+1]) && (s_max != set[i+2])) {
+ entry_count++;
+ } else if ((mas_start.node != mas_end.node) ||
+ (mas_start.offset != mas_end.offset)) {
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
+ s_entry, s_min,
+ e_entry, e_max, set, i,
+ true);
+ }
+
+
+ erase_check_store_range(mt, set, i + 1, value);
+ break;
+ case STORE:
+ value = xa_mk_value(set[i + 1]);
+ if (mas_start.offset > mt_slot_count(mas_start.node)) {
+ entry_count++; /* appending an entry. */
+ } else if ((s_min == e_min) && (s_max == e_max)) {
+ if (!entry_count)
+ entry_count++;
+
+ else if (s_entry) {
+ if (e_max > mas_end.last)
+ entry_count++;
+
+ if (s_min < mas_start.index)
+ entry_count++;
+
+ } else {
+ entry_count++;
+ }
+ } else {
+ entry_count -=
+ mas_ce2_over_count(&mas_start, &mas_end,
+ s_entry, s_min,
+ e_entry, e_max, set, i,
+ false);
+ }
+
+ erase_check_store_range(mt, set, i + 1, value);
+ break;
+ case ERASE:
+ if (!s_entry)
+ break;
+ check_erase(mt, set[i+1], xa_mk_value(set[i+1]));
+ entry_count--;
+ break;
+ }
+ mt_validate(mt);
+ if (entry_count)
+ MT_BUG_ON(mt, !mt_height(mt));
+#if check_erase2_debug > 1
+ mt_dump(mt);
+#endif
+#if check_erase2_debug
+ pr_err("Done\n");
+#endif
+
+ check = 0;
+ addr = 0;
+ mt_for_each(mt, foo, addr, ULONG_MAX) {
+ check++;
+#if check_erase2_debug > 2
+ pr_err("mt: %lu -> %p (%d)\n", addr+1, foo, check);
+#endif
+ if (check > entry_count)
+ break;
+ }
+
+#if check_erase2_debug > 2
+ pr_err("mt_for_each %d and count %d\n", check, entry_count);
+#endif
+
+ MT_BUG_ON(mt, check != entry_count);
+
+ check = 0;
+ addr = 0;
+ mas_reset(&mas);
+ mas.index = 0;
+ rcu_read_lock();
+ mas_for_each(&mas, foo, ULONG_MAX) {
+ if (xa_is_zero(foo)) {
+ if (addr == mas.index) {
+ mt_dump(mas.tree);
+ pr_err("retry failed %lu - %lu\n",
+ mas.index, mas.last);
+ MT_BUG_ON(mt, 1);
+ }
+ addr = mas.index;
+ continue;
+ }
+#if check_erase2_debug > 2
+ pr_err("mas: %lu -> %p\n", mas.index, foo);
+#endif
+ check++;
+ if (check > entry_count)
+ break;
+ }
+ rcu_read_unlock();
+#if check_erase2_debug > 2
+ pr_err("mas_for_each %d and count %d\n", check, entry_count);
+ mt_validate(mt);
+#endif
+
+ MT_BUG_ON(mt, check != entry_count);
+
+ MT_BUG_ON(mt, mtree_load(mas.tree, 0) != NULL);
+ }
+}
+
+
+/* These tests were pulled from kvm tests. */
+static noinline void check_erase2_sets(struct maple_tree *mt)
+{
+ void *entry;
+ unsigned long start = 0;
+ unsigned long set[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140721266458624, 140737488351231,
+ERASE, 140721266458624, 140737488351231,
+STORE, 140721266458624, 140721266462719,
+STORE, 94735788949504, 94735789121535,
+ERASE, 94735788949504, 94735789121535,
+STORE, 94735788949504, 94735788965887,
+STORE, 94735788965888, 94735789121535,
+ERASE, 94735788965888, 94735789121535,
+STORE, 94735788965888, 94735789068287,
+STORE, 94735789068288, 94735789109247,
+STORE, 94735789109248, 94735789121535,
+STORE, 140253902692352, 140253902864383,
+ERASE, 140253902692352, 140253902864383,
+STORE, 140253902692352, 140253902696447,
+STORE, 140253902696448, 140253902864383,
+ };
+ unsigned long set2[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735933583360, 140737488351231,
+ERASE, 140735933583360, 140737488351231,
+STORE, 140735933583360, 140735933587455,
+STORE, 94811003260928, 94811003432959,
+ERASE, 94811003260928, 94811003432959,
+STORE, 94811003260928, 94811003277311,
+STORE, 94811003277312, 94811003432959,
+ERASE, 94811003277312, 94811003432959,
+STORE, 94811003277312, 94811003379711,
+STORE, 94811003379712, 94811003420671,
+STORE, 94811003420672, 94811003432959,
+STORE, 140277094653952, 140277094825983,
+ERASE, 140277094653952, 140277094825983,
+STORE, 140277094653952, 140277094658047,
+STORE, 140277094658048, 140277094825983,
+ERASE, 140277094658048, 140277094825983,
+STORE, 140277094658048, 140277094780927,
+STORE, 140277094780928, 140277094813695,
+STORE, 140277094813696, 140277094821887,
+STORE, 140277094821888, 140277094825983,
+STORE, 140735933906944, 140735933911039,
+ };
+ unsigned long set3[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735790264320, 140737488351231,
+ERASE, 140735790264320, 140737488351231,
+STORE, 140735790264320, 140735790268415,
+STORE, 94016597282816, 94016597454847,
+ERASE, 94016597282816, 94016597454847,
+STORE, 94016597282816, 94016597299199,
+STORE, 94016597299200, 94016597454847,
+ERASE, 94016597299200, 94016597454847,
+STORE, 94016597299200, 94016597401599,
+STORE, 94016597401600, 94016597442559,
+STORE, 94016597442560, 94016597454847,
+STORE, 140496959283200, 140496959455231,
+ERASE, 140496959283200, 140496959455231,
+STORE, 140496959283200, 140496959287295,
+STORE, 140496959287296, 140496959455231,
+ERASE, 140496959287296, 140496959455231,
+STORE, 140496959287296, 140496959410175,
+STORE, 140496959410176, 140496959442943,
+STORE, 140496959442944, 140496959451135,
+STORE, 140496959451136, 140496959455231,
+STORE, 140735791718400, 140735791722495,
+STORE, 140735791706112, 140735791718399,
+STORE, 47135835713536, 47135835721727,
+STORE, 47135835721728, 47135835729919,
+STORE, 47135835729920, 47135835893759,
+ERASE, 47135835729920, 47135835893759,
+STORE, 47135835729920, 47135835742207,
+STORE, 47135835742208, 47135835893759,
+STORE, 47135835840512, 47135835893759,
+STORE, 47135835742208, 47135835840511,
+ERASE, 47135835742208, 47135835840511,
+STORE, 47135835742208, 47135835840511,
+STORE, 47135835885568, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+ERASE, 47135835840512, 47135835885567,
+STORE, 47135835840512, 47135835893759,
+ERASE, 47135835840512, 47135835893759,
+STORE, 47135835840512, 47135835885567,
+STORE, 47135835885568, 47135835893759,
+ };
+
+ unsigned long set4[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140728251703296, 140737488351231,
+ERASE, 140728251703296, 140737488351231,
+STORE, 140728251703296, 140728251707391,
+STORE, 94668429205504, 94668429377535,
+ERASE, 94668429205504, 94668429377535,
+STORE, 94668429205504, 94668429221887,
+STORE, 94668429221888, 94668429377535,
+ERASE, 94668429221888, 94668429377535,
+STORE, 94668429221888, 94668429324287,
+STORE, 94668429324288, 94668429365247,
+STORE, 94668429365248, 94668429377535,
+STORE, 47646523273216, 47646523445247,
+ERASE, 47646523273216, 47646523445247,
+STORE, 47646523273216, 47646523277311,
+STORE, 47646523277312, 47646523445247,
+ERASE, 47646523277312, 47646523445247,
+STORE, 47646523277312, 47646523400191,
+ };
+
+ unsigned long set5[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140726874062848, 140737488351231,
+ERASE, 140726874062848, 140737488351231,
+STORE, 140726874062848, 140726874066943,
+STORE, 94248892870656, 94248893042687,
+ERASE, 94248892870656, 94248893042687,
+STORE, 94248892870656, 94248892887039,
+STORE, 94248892887040, 94248893042687,
+ERASE, 94248892887040, 94248893042687,
+STORE, 94248892887040, 94248892989439,
+STORE, 94248892989440, 94248893030399,
+STORE, 94248893030400, 94248893042687,
+STORE, 47884786266112, 47884786438143,
+ERASE, 47884786266112, 47884786438143,
+STORE, 47884786266112, 47884786270207,
+STORE, 47884786270208, 47884786438143,
+ERASE, 47884786270208, 47884786438143,
+STORE, 47884786270208, 47884786393087,
+STORE, 47884786393088, 47884786425855,
+STORE, 47884786425856, 47884786434047,
+STORE, 47884786434048, 47884786438143,
+STORE, 140726874513408, 140726874517503,
+STORE, 140726874501120, 140726874513407,
+STORE, 47884786438144, 47884786446335,
+STORE, 47884786446336, 47884786454527,
+STORE, 47884786454528, 47884786618367,
+ERASE, 47884786454528, 47884786618367,
+STORE, 47884786454528, 47884786466815,
+STORE, 47884786466816, 47884786618367,
+STORE, 47884786565120, 47884786618367,
+STORE, 47884786466816, 47884786565119,
+ERASE, 47884786466816, 47884786565119,
+STORE, 47884786466816, 47884786565119,
+STORE, 47884786610176, 47884786618367,
+STORE, 47884786565120, 47884786610175,
+ERASE, 47884786565120, 47884786610175,
+STORE, 47884786565120, 47884786618367,
+ERASE, 47884786565120, 47884786618367,
+STORE, 47884786565120, 47884786610175,
+STORE, 47884786610176, 47884786618367,
+ERASE, 47884786610176, 47884786618367,
+STORE, 47884786610176, 47884786618367,
+STORE, 47884786618368, 47884789669887,
+STORE, 47884787163136, 47884789669887,
+STORE, 47884786618368, 47884787163135,
+ERASE, 47884787163136, 47884789669887,
+STORE, 47884787163136, 47884789448703,
+STORE, 47884789448704, 47884789669887,
+STORE, 47884788858880, 47884789448703,
+STORE, 47884787163136, 47884788858879,
+ERASE, 47884787163136, 47884788858879,
+STORE, 47884787163136, 47884788858879,
+STORE, 47884789444608, 47884789448703,
+STORE, 47884788858880, 47884789444607,
+ERASE, 47884788858880, 47884789444607,
+STORE, 47884788858880, 47884789444607,
+STORE, 47884789653504, 47884789669887,
+STORE, 47884789448704, 47884789653503,
+ERASE, 47884789448704, 47884789653503,
+STORE, 47884789448704, 47884789653503,
+ERASE, 47884789653504, 47884789669887,
+STORE, 47884789653504, 47884789669887,
+STORE, 47884789669888, 47884791508991,
+STORE, 47884789809152, 47884791508991,
+STORE, 47884789669888, 47884789809151,
+ERASE, 47884789809152, 47884791508991,
+STORE, 47884789809152, 47884791468031,
+STORE, 47884791468032, 47884791508991,
+STORE, 47884791152640, 47884791468031,
+STORE, 47884789809152, 47884791152639,
+ERASE, 47884789809152, 47884791152639,
+STORE, 47884789809152, 47884791152639,
+STORE, 47884791463936, 47884791468031,
+STORE, 47884791152640, 47884791463935,
+ERASE, 47884791152640, 47884791463935,
+STORE, 47884791152640, 47884791463935,
+STORE, 47884791492608, 47884791508991,
+STORE, 47884791468032, 47884791492607,
+ERASE, 47884791468032, 47884791492607,
+STORE, 47884791468032, 47884791492607,
+ERASE, 47884791492608, 47884791508991,
+STORE, 47884791492608, 47884791508991,
+STORE, 47884791508992, 47884791644159,
+ERASE, 47884791508992, 47884791644159,
+STORE, 47884791508992, 47884791533567,
+STORE, 47884791533568, 47884791644159,
+STORE, 47884791595008, 47884791644159,
+STORE, 47884791533568, 47884791595007,
+ERASE, 47884791533568, 47884791595007,
+STORE, 47884791533568, 47884791595007,
+STORE, 47884791619584, 47884791644159,
+STORE, 47884791595008, 47884791619583,
+ERASE, 47884791595008, 47884791619583,
+STORE, 47884791595008, 47884791644159,
+ERASE, 47884791595008, 47884791644159,
+STORE, 47884791595008, 47884791619583,
+STORE, 47884791619584, 47884791644159,
+STORE, 47884791627776, 47884791644159,
+STORE, 47884791619584, 47884791627775,
+ERASE, 47884791619584, 47884791627775,
+STORE, 47884791619584, 47884791627775,
+ERASE, 47884791627776, 47884791644159,
+STORE, 47884791627776, 47884791644159,
+STORE, 47884791644160, 47884791664639,
+ERASE, 47884791644160, 47884791664639,
+STORE, 47884791644160, 47884791648255,
+STORE, 47884791648256, 47884791664639,
+STORE, 47884791652352, 47884791664639,
+STORE, 47884791648256, 47884791652351,
+ERASE, 47884791648256, 47884791652351,
+STORE, 47884791648256, 47884791652351,
+STORE, 47884791656448, 47884791664639,
+STORE, 47884791652352, 47884791656447,
+ERASE, 47884791652352, 47884791656447,
+STORE, 47884791652352, 47884791664639,
+ERASE, 47884791652352, 47884791664639,
+STORE, 47884791652352, 47884791656447,
+STORE, 47884791656448, 47884791664639,
+ERASE, 47884791656448, 47884791664639,
+STORE, 47884791656448, 47884791664639,
+STORE, 47884791664640, 47884791672831,
+ERASE, 47884791468032, 47884791492607,
+STORE, 47884791468032, 47884791484415,
+STORE, 47884791484416, 47884791492607,
+ERASE, 47884791656448, 47884791664639,
+STORE, 47884791656448, 47884791660543,
+STORE, 47884791660544, 47884791664639,
+ERASE, 47884791619584, 47884791627775,
+STORE, 47884791619584, 47884791623679,
+STORE, 47884791623680, 47884791627775,
+ };
+
+ unsigned long set6[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722999021568, 140737488351231,
+ERASE, 140722999021568, 140737488351231,
+STORE, 140722999021568, 140722999025663,
+STORE, 94901500268544, 94901500440575,
+ERASE, 94901500268544, 94901500440575,
+STORE, 94901500268544, 94901500284927,
+STORE, 94901500284928, 94901500440575,
+ERASE, 94901500284928, 94901500440575,
+STORE, 94901500284928, 94901500387327,
+STORE, 94901500387328, 94901500428287,
+STORE, 94901500428288, 94901500440575,
+STORE, 47430426660864, 47430426832895,
+ERASE, 47430426660864, 47430426832895,
+STORE, 47430426660864, 47430426664959,
+STORE, 47430426664960, 47430426832895,
+ERASE, 47430426664960, 47430426832895,
+STORE, 47430426664960, 47430426787839,
+STORE, 47430426787840, 47430426820607,
+STORE, 47430426820608, 47430426828799,
+STORE, 47430426828800, 47430426832895,
+STORE, 140722999115776, 140722999119871,
+STORE, 140722999103488, 140722999115775,
+STORE, 47430426832896, 47430426841087,
+STORE, 47430426841088, 47430426849279,
+STORE, 47430426849280, 47430427013119,
+ERASE, 47430426849280, 47430427013119,
+STORE, 47430426849280, 47430426861567,
+STORE, 47430426861568, 47430427013119,
+STORE, 47430426959872, 47430427013119,
+STORE, 47430426861568, 47430426959871,
+ERASE, 47430426861568, 47430426959871,
+STORE, 47430426861568, 47430426959871,
+STORE, 47430427004928, 47430427013119,
+STORE, 47430426959872, 47430427004927,
+ERASE, 47430426959872, 47430427004927,
+STORE, 47430426959872, 47430427013119,
+ERASE, 47430426959872, 47430427013119,
+STORE, 47430426959872, 47430427004927,
+STORE, 47430427004928, 47430427013119,
+ERASE, 47430427004928, 47430427013119,
+STORE, 47430427004928, 47430427013119,
+STORE, 47430427013120, 47430430064639,
+STORE, 47430427557888, 47430430064639,
+STORE, 47430427013120, 47430427557887,
+ERASE, 47430427557888, 47430430064639,
+STORE, 47430427557888, 47430429843455,
+STORE, 47430429843456, 47430430064639,
+STORE, 47430429253632, 47430429843455,
+STORE, 47430427557888, 47430429253631,
+ERASE, 47430427557888, 47430429253631,
+STORE, 47430427557888, 47430429253631,
+STORE, 47430429839360, 47430429843455,
+STORE, 47430429253632, 47430429839359,
+ERASE, 47430429253632, 47430429839359,
+STORE, 47430429253632, 47430429839359,
+STORE, 47430430048256, 47430430064639,
+STORE, 47430429843456, 47430430048255,
+ERASE, 47430429843456, 47430430048255,
+STORE, 47430429843456, 47430430048255,
+ERASE, 47430430048256, 47430430064639,
+STORE, 47430430048256, 47430430064639,
+STORE, 47430430064640, 47430431903743,
+STORE, 47430430203904, 47430431903743,
+STORE, 47430430064640, 47430430203903,
+ERASE, 47430430203904, 47430431903743,
+STORE, 47430430203904, 47430431862783,
+STORE, 47430431862784, 47430431903743,
+STORE, 47430431547392, 47430431862783,
+STORE, 47430430203904, 47430431547391,
+ERASE, 47430430203904, 47430431547391,
+STORE, 47430430203904, 47430431547391,
+STORE, 47430431858688, 47430431862783,
+STORE, 47430431547392, 47430431858687,
+ERASE, 47430431547392, 47430431858687,
+STORE, 47430431547392, 47430431858687,
+STORE, 47430431887360, 47430431903743,
+STORE, 47430431862784, 47430431887359,
+ERASE, 47430431862784, 47430431887359,
+STORE, 47430431862784, 47430431887359,
+ERASE, 47430431887360, 47430431903743,
+STORE, 47430431887360, 47430431903743,
+STORE, 47430431903744, 47430432038911,
+ERASE, 47430431903744, 47430432038911,
+STORE, 47430431903744, 47430431928319,
+STORE, 47430431928320, 47430432038911,
+STORE, 47430431989760, 47430432038911,
+STORE, 47430431928320, 47430431989759,
+ERASE, 47430431928320, 47430431989759,
+STORE, 47430431928320, 47430431989759,
+STORE, 47430432014336, 47430432038911,
+STORE, 47430431989760, 47430432014335,
+ERASE, 47430431989760, 47430432014335,
+STORE, 47430431989760, 47430432038911,
+ERASE, 47430431989760, 47430432038911,
+STORE, 47430431989760, 47430432014335,
+STORE, 47430432014336, 47430432038911,
+STORE, 47430432022528, 47430432038911,
+STORE, 47430432014336, 47430432022527,
+ERASE, 47430432014336, 47430432022527,
+STORE, 47430432014336, 47430432022527,
+ERASE, 47430432022528, 47430432038911,
+STORE, 47430432022528, 47430432038911,
+STORE, 47430432038912, 47430432059391,
+ERASE, 47430432038912, 47430432059391,
+STORE, 47430432038912, 47430432043007,
+STORE, 47430432043008, 47430432059391,
+STORE, 47430432047104, 47430432059391,
+STORE, 47430432043008, 47430432047103,
+ERASE, 47430432043008, 47430432047103,
+STORE, 47430432043008, 47430432047103,
+STORE, 47430432051200, 47430432059391,
+STORE, 47430432047104, 47430432051199,
+ERASE, 47430432047104, 47430432051199,
+STORE, 47430432047104, 47430432059391,
+ERASE, 47430432047104, 47430432059391,
+STORE, 47430432047104, 47430432051199,
+STORE, 47430432051200, 47430432059391,
+ERASE, 47430432051200, 47430432059391,
+STORE, 47430432051200, 47430432059391,
+STORE, 47430432059392, 47430432067583,
+ERASE, 47430431862784, 47430431887359,
+STORE, 47430431862784, 47430431879167,
+STORE, 47430431879168, 47430431887359,
+ERASE, 47430432051200, 47430432059391,
+STORE, 47430432051200, 47430432055295,
+STORE, 47430432055296, 47430432059391,
+ERASE, 47430432014336, 47430432022527,
+STORE, 47430432014336, 47430432018431,
+STORE, 47430432018432, 47430432022527,
+ };
+ unsigned long set7[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140729808330752, 140737488351231,
+ERASE, 140729808330752, 140737488351231,
+STORE, 140729808330752, 140729808334847,
+STORE, 94629632020480, 94629632192511,
+ERASE, 94629632020480, 94629632192511,
+STORE, 94629632020480, 94629632036863,
+STORE, 94629632036864, 94629632192511,
+ERASE, 94629632036864, 94629632192511,
+STORE, 94629632036864, 94629632139263,
+STORE, 94629632139264, 94629632180223,
+STORE, 94629632180224, 94629632192511,
+STORE, 47439981776896, 47439981948927,
+ERASE, 47439981776896, 47439981948927,
+STORE, 47439981776896, 47439981780991,
+STORE, 47439981780992, 47439981948927,
+ERASE, 47439981780992, 47439981948927,
+STORE, 47439981780992, 47439981903871,
+STORE, 47439981903872, 47439981936639,
+STORE, 47439981936640, 47439981944831,
+STORE, 47439981944832, 47439981948927,
+STORE, 140729808474112, 140729808478207,
+STORE, 140729808461824, 140729808474111,
+STORE, 47439981948928, 47439981957119,
+STORE, 47439981957120, 47439981965311,
+STORE, 47439981965312, 47439982129151,
+ERASE, 47439981965312, 47439982129151,
+STORE, 47439981965312, 47439981977599,
+STORE, 47439981977600, 47439982129151,
+STORE, 47439982075904, 47439982129151,
+STORE, 47439981977600, 47439982075903,
+ERASE, 47439981977600, 47439982075903,
+STORE, 47439981977600, 47439982075903,
+STORE, 47439982120960, 47439982129151,
+STORE, 47439982075904, 47439982120959,
+ERASE, 47439982075904, 47439982120959,
+STORE, 47439982075904, 47439982129151,
+ERASE, 47439982075904, 47439982129151,
+STORE, 47439982075904, 47439982120959,
+STORE, 47439982120960, 47439982129151,
+ERASE, 47439982120960, 47439982129151,
+STORE, 47439982120960, 47439982129151,
+STORE, 47439982129152, 47439985180671,
+STORE, 47439982673920, 47439985180671,
+STORE, 47439982129152, 47439982673919,
+ERASE, 47439982673920, 47439985180671,
+STORE, 47439982673920, 47439984959487,
+STORE, 47439984959488, 47439985180671,
+STORE, 47439984369664, 47439984959487,
+STORE, 47439982673920, 47439984369663,
+ERASE, 47439982673920, 47439984369663,
+STORE, 47439982673920, 47439984369663,
+STORE, 47439984955392, 47439984959487,
+STORE, 47439984369664, 47439984955391,
+ERASE, 47439984369664, 47439984955391,
+STORE, 47439984369664, 47439984955391,
+STORE, 47439985164288, 47439985180671,
+STORE, 47439984959488, 47439985164287,
+ERASE, 47439984959488, 47439985164287,
+STORE, 47439984959488, 47439985164287,
+ERASE, 47439985164288, 47439985180671,
+STORE, 47439985164288, 47439985180671,
+STORE, 47439985180672, 47439987019775,
+STORE, 47439985319936, 47439987019775,
+STORE, 47439985180672, 47439985319935,
+ERASE, 47439985319936, 47439987019775,
+STORE, 47439985319936, 47439986978815,
+STORE, 47439986978816, 47439987019775,
+STORE, 47439986663424, 47439986978815,
+STORE, 47439985319936, 47439986663423,
+ERASE, 47439985319936, 47439986663423,
+STORE, 47439985319936, 47439986663423,
+STORE, 47439986974720, 47439986978815,
+STORE, 47439986663424, 47439986974719,
+ERASE, 47439986663424, 47439986974719,
+STORE, 47439986663424, 47439986974719,
+STORE, 47439987003392, 47439987019775,
+STORE, 47439986978816, 47439987003391,
+ERASE, 47439986978816, 47439987003391,
+STORE, 47439986978816, 47439987003391,
+ERASE, 47439987003392, 47439987019775,
+STORE, 47439987003392, 47439987019775,
+STORE, 47439987019776, 47439987154943,
+ERASE, 47439987019776, 47439987154943,
+STORE, 47439987019776, 47439987044351,
+STORE, 47439987044352, 47439987154943,
+STORE, 47439987105792, 47439987154943,
+STORE, 47439987044352, 47439987105791,
+ERASE, 47439987044352, 47439987105791,
+STORE, 47439987044352, 47439987105791,
+STORE, 47439987130368, 47439987154943,
+STORE, 47439987105792, 47439987130367,
+ERASE, 47439987105792, 47439987130367,
+STORE, 47439987105792, 47439987154943,
+ERASE, 47439987105792, 47439987154943,
+STORE, 47439987105792, 47439987130367,
+STORE, 47439987130368, 47439987154943,
+STORE, 47439987138560, 47439987154943,
+STORE, 47439987130368, 47439987138559,
+ERASE, 47439987130368, 47439987138559,
+STORE, 47439987130368, 47439987138559,
+ERASE, 47439987138560, 47439987154943,
+STORE, 47439987138560, 47439987154943,
+STORE, 47439987154944, 47439987175423,
+ERASE, 47439987154944, 47439987175423,
+STORE, 47439987154944, 47439987159039,
+STORE, 47439987159040, 47439987175423,
+STORE, 47439987163136, 47439987175423,
+STORE, 47439987159040, 47439987163135,
+ERASE, 47439987159040, 47439987163135,
+STORE, 47439987159040, 47439987163135,
+STORE, 47439987167232, 47439987175423,
+STORE, 47439987163136, 47439987167231,
+ERASE, 47439987163136, 47439987167231,
+STORE, 47439987163136, 47439987175423,
+ERASE, 47439987163136, 47439987175423,
+STORE, 47439987163136, 47439987167231,
+STORE, 47439987167232, 47439987175423,
+ERASE, 47439987167232, 47439987175423,
+STORE, 47439987167232, 47439987175423,
+STORE, 47439987175424, 47439987183615,
+ERASE, 47439986978816, 47439987003391,
+STORE, 47439986978816, 47439986995199,
+STORE, 47439986995200, 47439987003391,
+ERASE, 47439987167232, 47439987175423,
+STORE, 47439987167232, 47439987171327,
+STORE, 47439987171328, 47439987175423,
+ERASE, 47439987130368, 47439987138559,
+STORE, 47439987130368, 47439987134463,
+STORE, 47439987134464, 47439987138559,
+ };
+ unsigned long set8[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722482974720, 140737488351231,
+ERASE, 140722482974720, 140737488351231,
+STORE, 140722482974720, 140722482978815,
+STORE, 94121505034240, 94121505206271,
+ERASE, 94121505034240, 94121505206271,
+STORE, 94121505034240, 94121505050623,
+STORE, 94121505050624, 94121505206271,
+ERASE, 94121505050624, 94121505206271,
+STORE, 94121505050624, 94121505153023,
+STORE, 94121505153024, 94121505193983,
+STORE, 94121505193984, 94121505206271,
+STORE, 47708483284992, 47708483457023,
+ERASE, 47708483284992, 47708483457023,
+STORE, 47708483284992, 47708483289087,
+STORE, 47708483289088, 47708483457023,
+ERASE, 47708483289088, 47708483457023,
+STORE, 47708483289088, 47708483411967,
+STORE, 47708483411968, 47708483444735,
+STORE, 47708483444736, 47708483452927,
+STORE, 47708483452928, 47708483457023,
+STORE, 140722483142656, 140722483146751,
+STORE, 140722483130368, 140722483142655,
+STORE, 47708483457024, 47708483465215,
+STORE, 47708483465216, 47708483473407,
+STORE, 47708483473408, 47708483637247,
+ERASE, 47708483473408, 47708483637247,
+STORE, 47708483473408, 47708483485695,
+STORE, 47708483485696, 47708483637247,
+STORE, 47708483584000, 47708483637247,
+STORE, 47708483485696, 47708483583999,
+ERASE, 47708483485696, 47708483583999,
+STORE, 47708483485696, 47708483583999,
+STORE, 47708483629056, 47708483637247,
+STORE, 47708483584000, 47708483629055,
+ERASE, 47708483584000, 47708483629055,
+STORE, 47708483584000, 47708483637247,
+ERASE, 47708483584000, 47708483637247,
+STORE, 47708483584000, 47708483629055,
+STORE, 47708483629056, 47708483637247,
+ERASE, 47708483629056, 47708483637247,
+STORE, 47708483629056, 47708483637247,
+STORE, 47708483637248, 47708486688767,
+STORE, 47708484182016, 47708486688767,
+STORE, 47708483637248, 47708484182015,
+ERASE, 47708484182016, 47708486688767,
+STORE, 47708484182016, 47708486467583,
+STORE, 47708486467584, 47708486688767,
+STORE, 47708485877760, 47708486467583,
+STORE, 47708484182016, 47708485877759,
+ERASE, 47708484182016, 47708485877759,
+STORE, 47708484182016, 47708485877759,
+STORE, 47708486463488, 47708486467583,
+STORE, 47708485877760, 47708486463487,
+ERASE, 47708485877760, 47708486463487,
+STORE, 47708485877760, 47708486463487,
+STORE, 47708486672384, 47708486688767,
+STORE, 47708486467584, 47708486672383,
+ERASE, 47708486467584, 47708486672383,
+STORE, 47708486467584, 47708486672383,
+ERASE, 47708486672384, 47708486688767,
+STORE, 47708486672384, 47708486688767,
+STORE, 47708486688768, 47708488527871,
+STORE, 47708486828032, 47708488527871,
+STORE, 47708486688768, 47708486828031,
+ERASE, 47708486828032, 47708488527871,
+STORE, 47708486828032, 47708488486911,
+STORE, 47708488486912, 47708488527871,
+STORE, 47708488171520, 47708488486911,
+STORE, 47708486828032, 47708488171519,
+ERASE, 47708486828032, 47708488171519,
+STORE, 47708486828032, 47708488171519,
+STORE, 47708488482816, 47708488486911,
+STORE, 47708488171520, 47708488482815,
+ERASE, 47708488171520, 47708488482815,
+STORE, 47708488171520, 47708488482815,
+STORE, 47708488511488, 47708488527871,
+STORE, 47708488486912, 47708488511487,
+ERASE, 47708488486912, 47708488511487,
+STORE, 47708488486912, 47708488511487,
+ERASE, 47708488511488, 47708488527871,
+STORE, 47708488511488, 47708488527871,
+STORE, 47708488527872, 47708488663039,
+ERASE, 47708488527872, 47708488663039,
+STORE, 47708488527872, 47708488552447,
+STORE, 47708488552448, 47708488663039,
+STORE, 47708488613888, 47708488663039,
+STORE, 47708488552448, 47708488613887,
+ERASE, 47708488552448, 47708488613887,
+STORE, 47708488552448, 47708488613887,
+STORE, 47708488638464, 47708488663039,
+STORE, 47708488613888, 47708488638463,
+ERASE, 47708488613888, 47708488638463,
+STORE, 47708488613888, 47708488663039,
+ERASE, 47708488613888, 47708488663039,
+STORE, 47708488613888, 47708488638463,
+STORE, 47708488638464, 47708488663039,
+STORE, 47708488646656, 47708488663039,
+STORE, 47708488638464, 47708488646655,
+ERASE, 47708488638464, 47708488646655,
+STORE, 47708488638464, 47708488646655,
+ERASE, 47708488646656, 47708488663039,
+STORE, 47708488646656, 47708488663039,
+STORE, 47708488663040, 47708488683519,
+ERASE, 47708488663040, 47708488683519,
+STORE, 47708488663040, 47708488667135,
+STORE, 47708488667136, 47708488683519,
+STORE, 47708488671232, 47708488683519,
+STORE, 47708488667136, 47708488671231,
+ERASE, 47708488667136, 47708488671231,
+STORE, 47708488667136, 47708488671231,
+STORE, 47708488675328, 47708488683519,
+STORE, 47708488671232, 47708488675327,
+ERASE, 47708488671232, 47708488675327,
+STORE, 47708488671232, 47708488683519,
+ERASE, 47708488671232, 47708488683519,
+STORE, 47708488671232, 47708488675327,
+STORE, 47708488675328, 47708488683519,
+ERASE, 47708488675328, 47708488683519,
+STORE, 47708488675328, 47708488683519,
+STORE, 47708488683520, 47708488691711,
+ERASE, 47708488486912, 47708488511487,
+STORE, 47708488486912, 47708488503295,
+STORE, 47708488503296, 47708488511487,
+ERASE, 47708488675328, 47708488683519,
+STORE, 47708488675328, 47708488679423,
+STORE, 47708488679424, 47708488683519,
+ERASE, 47708488638464, 47708488646655,
+STORE, 47708488638464, 47708488642559,
+STORE, 47708488642560, 47708488646655,
+ };
+
+ unsigned long set9[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736427839488, 140737488351231,
+ERASE, 140736427839488, 140736427839488,
+STORE, 140736427839488, 140736427843583,
+STORE, 94071213395968, 94071213567999,
+ERASE, 94071213395968, 94071213395968,
+STORE, 94071213395968, 94071213412351,
+STORE, 94071213412352, 94071213567999,
+ERASE, 94071213412352, 94071213412352,
+STORE, 94071213412352, 94071213514751,
+STORE, 94071213514752, 94071213555711,
+STORE, 94071213555712, 94071213567999,
+STORE, 139968410644480, 139968410816511,
+ERASE, 139968410644480, 139968410644480,
+STORE, 139968410644480, 139968410648575,
+STORE, 139968410648576, 139968410816511,
+ERASE, 139968410648576, 139968410648576,
+STORE, 139968410648576, 139968410771455,
+STORE, 139968410771456, 139968410804223,
+STORE, 139968410804224, 139968410812415,
+STORE, 139968410812416, 139968410816511,
+STORE, 140736429277184, 140736429281279,
+STORE, 140736429264896, 140736429277183,
+STORE, 47664384352256, 47664384360447,
+STORE, 47664384360448, 47664384368639,
+STORE, 47664384368640, 47664384532479,
+ERASE, 47664384368640, 47664384368640,
+STORE, 47664384368640, 47664384380927,
+STORE, 47664384380928, 47664384532479,
+STORE, 47664384479232, 47664384532479,
+STORE, 47664384380928, 47664384479231,
+ERASE, 47664384380928, 47664384380928,
+STORE, 47664384380928, 47664384479231,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384479232, 47664384524287,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384532479,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384524287,
+STORE, 47664384524288, 47664384532479,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384532480, 47664387583999,
+STORE, 47664385077248, 47664387583999,
+STORE, 47664384532480, 47664385077247,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664387362815,
+STORE, 47664387362816, 47664387583999,
+STORE, 47664386772992, 47664387362815,
+STORE, 47664385077248, 47664386772991,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664386772991,
+STORE, 47664387358720, 47664387362815,
+STORE, 47664386772992, 47664387358719,
+ERASE, 47664386772992, 47664386772992,
+STORE, 47664386772992, 47664387358719,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387567616, 47664387567616,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387584000, 47664389423103,
+STORE, 47664387723264, 47664389423103,
+STORE, 47664387584000, 47664387723263,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389382143,
+STORE, 47664389382144, 47664389423103,
+STORE, 47664389066752, 47664389382143,
+STORE, 47664387723264, 47664389066751,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389066751,
+STORE, 47664389378048, 47664389382143,
+STORE, 47664389066752, 47664389378047,
+ERASE, 47664389066752, 47664389066752,
+STORE, 47664389066752, 47664389378047,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389406720, 47664389406720,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389423104, 47664389558271,
+ERASE, 47664389423104, 47664389423104,
+STORE, 47664389423104, 47664389447679,
+STORE, 47664389447680, 47664389558271,
+STORE, 47664389509120, 47664389558271,
+STORE, 47664389447680, 47664389509119,
+ERASE, 47664389447680, 47664389447680,
+STORE, 47664389447680, 47664389509119,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389509120, 47664389533695,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389558271,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389533695,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389541888, 47664389541888,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389558272, 47664389578751,
+ERASE, 47664389558272, 47664389558272,
+STORE, 47664389558272, 47664389562367,
+STORE, 47664389562368, 47664389578751,
+STORE, 47664389566464, 47664389578751,
+STORE, 47664389562368, 47664389566463,
+ERASE, 47664389562368, 47664389562368,
+STORE, 47664389562368, 47664389566463,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389566464, 47664389570559,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389578751,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389570559,
+STORE, 47664389570560, 47664389578751,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389578752, 47664389586943,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389398527,
+STORE, 47664389398528, 47664389406719,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389574655,
+STORE, 47664389574656, 47664389578751,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389537791,
+STORE, 47664389537792, 47664389541887,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387559423,
+STORE, 47664387559424, 47664387567615,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384528383,
+STORE, 47664384528384, 47664384532479,
+ERASE, 94071213555712, 94071213555712,
+STORE, 94071213555712, 94071213563903,
+STORE, 94071213563904, 94071213567999,
+ERASE, 139968410804224, 139968410804224,
+STORE, 139968410804224, 139968410808319,
+STORE, 139968410808320, 139968410812415,
+ERASE, 47664384352256, 47664384352256,
+STORE, 94071244402688, 94071244537855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728271503360, 140737488351231,
+ERASE, 140728271503360, 140728271503360,
+STORE, 140728271503360, 140728271507455,
+STORE, 94410361982976, 94410362155007,
+ERASE, 94410361982976, 94410361982976,
+STORE, 94410361982976, 94410361999359,
+STORE, 94410361999360, 94410362155007,
+ERASE, 94410361999360, 94410361999360,
+STORE, 94410361999360, 94410362101759,
+STORE, 94410362101760, 94410362142719,
+STORE, 94410362142720, 94410362155007,
+STORE, 140351953997824, 140351954169855,
+ERASE, 140351953997824, 140351953997824,
+STORE, 140351953997824, 140351954001919,
+STORE, 140351954001920, 140351954169855,
+ERASE, 140351954001920, 140351954001920,
+STORE, 140351954001920, 140351954124799,
+STORE, 140351954124800, 140351954157567,
+STORE, 140351954157568, 140351954165759,
+STORE, 140351954165760, 140351954169855,
+STORE, 140728272429056, 140728272433151,
+STORE, 140728272416768, 140728272429055,
+STORE, 47280840998912, 47280841007103,
+STORE, 47280841007104, 47280841015295,
+STORE, 47280841015296, 47280841179135,
+ERASE, 47280841015296, 47280841015296,
+STORE, 47280841015296, 47280841027583,
+STORE, 47280841027584, 47280841179135,
+STORE, 47280841125888, 47280841179135,
+STORE, 47280841027584, 47280841125887,
+ERASE, 47280841027584, 47280841027584,
+STORE, 47280841027584, 47280841125887,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841125888, 47280841170943,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841179135,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841170943,
+STORE, 47280841170944, 47280841179135,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841179136, 47280844230655,
+STORE, 47280841723904, 47280844230655,
+STORE, 47280841179136, 47280841723903,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280844009471,
+STORE, 47280844009472, 47280844230655,
+STORE, 47280843419648, 47280844009471,
+STORE, 47280841723904, 47280843419647,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280843419647,
+STORE, 47280844005376, 47280844009471,
+STORE, 47280843419648, 47280844005375,
+ERASE, 47280843419648, 47280843419648,
+STORE, 47280843419648, 47280844005375,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844214272, 47280844214272,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844230656, 47280846069759,
+STORE, 47280844369920, 47280846069759,
+STORE, 47280844230656, 47280844369919,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280846028799,
+STORE, 47280846028800, 47280846069759,
+STORE, 47280845713408, 47280846028799,
+STORE, 47280844369920, 47280845713407,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280845713407,
+STORE, 47280846024704, 47280846028799,
+STORE, 47280845713408, 47280846024703,
+ERASE, 47280845713408, 47280845713408,
+STORE, 47280845713408, 47280846024703,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846053376, 47280846053376,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846069760, 47280846204927,
+ERASE, 47280846069760, 47280846069760,
+STORE, 47280846069760, 47280846094335,
+STORE, 47280846094336, 47280846204927,
+STORE, 47280846155776, 47280846204927,
+STORE, 47280846094336, 47280846155775,
+ERASE, 47280846094336, 47280846094336,
+STORE, 47280846094336, 47280846155775,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846155776, 47280846180351,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846204927,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846180351,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846188544, 47280846188544,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846204928, 47280846225407,
+ERASE, 47280846204928, 47280846204928,
+STORE, 47280846204928, 47280846209023,
+STORE, 47280846209024, 47280846225407,
+STORE, 47280846213120, 47280846225407,
+STORE, 47280846209024, 47280846213119,
+ERASE, 47280846209024, 47280846209024,
+STORE, 47280846209024, 47280846213119,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846213120, 47280846217215,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846225407,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846217215,
+STORE, 47280846217216, 47280846225407,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846225408, 47280846233599,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846045183,
+STORE, 47280846045184, 47280846053375,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846221311,
+STORE, 47280846221312, 47280846225407,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846184447,
+STORE, 47280846184448, 47280846188543,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844206079,
+STORE, 47280844206080, 47280844214271,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841175039,
+STORE, 47280841175040, 47280841179135,
+ERASE, 94410362142720, 94410362142720,
+STORE, 94410362142720, 94410362150911,
+STORE, 94410362150912, 94410362155007,
+ERASE, 140351954157568, 140351954157568,
+STORE, 140351954157568, 140351954161663,
+STORE, 140351954161664, 140351954165759,
+ERASE, 47280840998912, 47280840998912,
+STORE, 94410379456512, 94410379591679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732946362368, 140737488351231,
+ERASE, 140732946362368, 140732946362368,
+STORE, 140732946362368, 140732946366463,
+STORE, 94352937934848, 94352938106879,
+ERASE, 94352937934848, 94352937934848,
+STORE, 94352937934848, 94352937951231,
+STORE, 94352937951232, 94352938106879,
+ERASE, 94352937951232, 94352937951232,
+STORE, 94352937951232, 94352938053631,
+STORE, 94352938053632, 94352938094591,
+STORE, 94352938094592, 94352938106879,
+STORE, 140595518742528, 140595518914559,
+ERASE, 140595518742528, 140595518742528,
+STORE, 140595518742528, 140595518746623,
+STORE, 140595518746624, 140595518914559,
+ERASE, 140595518746624, 140595518746624,
+STORE, 140595518746624, 140595518869503,
+STORE, 140595518869504, 140595518902271,
+STORE, 140595518902272, 140595518910463,
+STORE, 140595518910464, 140595518914559,
+STORE, 140732947468288, 140732947472383,
+STORE, 140732947456000, 140732947468287,
+STORE, 47037276254208, 47037276262399,
+STORE, 47037276262400, 47037276270591,
+STORE, 47037276270592, 47037276434431,
+ERASE, 47037276270592, 47037276270592,
+STORE, 47037276270592, 47037276282879,
+STORE, 47037276282880, 47037276434431,
+STORE, 47037276381184, 47037276434431,
+STORE, 47037276282880, 47037276381183,
+ERASE, 47037276282880, 47037276282880,
+STORE, 47037276282880, 47037276381183,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276381184, 47037276426239,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276434431,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276426239,
+STORE, 47037276426240, 47037276434431,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276434432, 47037279485951,
+STORE, 47037276979200, 47037279485951,
+STORE, 47037276434432, 47037276979199,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037279264767,
+STORE, 47037279264768, 47037279485951,
+STORE, 47037278674944, 47037279264767,
+STORE, 47037276979200, 47037278674943,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037278674943,
+STORE, 47037279260672, 47037279264767,
+STORE, 47037278674944, 47037279260671,
+ERASE, 47037278674944, 47037278674944,
+STORE, 47037278674944, 47037279260671,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279469568, 47037279469568,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279485952, 47037281325055,
+STORE, 47037279625216, 47037281325055,
+STORE, 47037279485952, 47037279625215,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037281284095,
+STORE, 47037281284096, 47037281325055,
+STORE, 47037280968704, 47037281284095,
+STORE, 47037279625216, 47037280968703,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037280968703,
+STORE, 47037281280000, 47037281284095,
+STORE, 47037280968704, 47037281279999,
+ERASE, 47037280968704, 47037280968704,
+STORE, 47037280968704, 47037281279999,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281308672, 47037281308672,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281325056, 47037281460223,
+ERASE, 47037281325056, 47037281325056,
+STORE, 47037281325056, 47037281349631,
+STORE, 47037281349632, 47037281460223,
+STORE, 47037281411072, 47037281460223,
+STORE, 47037281349632, 47037281411071,
+ERASE, 47037281349632, 47037281349632,
+STORE, 47037281349632, 47037281411071,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281411072, 47037281435647,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281460223,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281435647,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281443840, 47037281443840,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281460224, 47037281480703,
+ERASE, 47037281460224, 47037281460224,
+STORE, 47037281460224, 47037281464319,
+STORE, 47037281464320, 47037281480703,
+STORE, 47037281468416, 47037281480703,
+STORE, 47037281464320, 47037281468415,
+ERASE, 47037281464320, 47037281464320,
+STORE, 47037281464320, 47037281468415,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281468416, 47037281472511,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281480703,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281472511,
+STORE, 47037281472512, 47037281480703,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281480704, 47037281488895,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281300479,
+STORE, 47037281300480, 47037281308671,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281476607,
+STORE, 47037281476608, 47037281480703,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281439743,
+STORE, 47037281439744, 47037281443839,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279461375,
+STORE, 47037279461376, 47037279469567,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276430335,
+STORE, 47037276430336, 47037276434431,
+ERASE, 94352938094592, 94352938094592,
+STORE, 94352938094592, 94352938102783,
+STORE, 94352938102784, 94352938106879,
+ERASE, 140595518902272, 140595518902272,
+STORE, 140595518902272, 140595518906367,
+STORE, 140595518906368, 140595518910463,
+ERASE, 47037276254208, 47037276254208,
+STORE, 94352938438656, 94352938573823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733506027520, 140737488351231,
+ERASE, 140733506027520, 140733506027520,
+STORE, 140733506027520, 140733506031615,
+STORE, 94150123073536, 94150123245567,
+ERASE, 94150123073536, 94150123073536,
+STORE, 94150123073536, 94150123089919,
+STORE, 94150123089920, 94150123245567,
+ERASE, 94150123089920, 94150123089920,
+STORE, 94150123089920, 94150123192319,
+STORE, 94150123192320, 94150123233279,
+STORE, 94150123233280, 94150123245567,
+STORE, 140081290375168, 140081290547199,
+ERASE, 140081290375168, 140081290375168,
+STORE, 140081290375168, 140081290379263,
+STORE, 140081290379264, 140081290547199,
+ERASE, 140081290379264, 140081290379264,
+STORE, 140081290379264, 140081290502143,
+STORE, 140081290502144, 140081290534911,
+STORE, 140081290534912, 140081290543103,
+STORE, 140081290543104, 140081290547199,
+STORE, 140733506707456, 140733506711551,
+STORE, 140733506695168, 140733506707455,
+STORE, 47551504621568, 47551504629759,
+STORE, 47551504629760, 47551504637951,
+STORE, 47551504637952, 47551504801791,
+ERASE, 47551504637952, 47551504637952,
+STORE, 47551504637952, 47551504650239,
+STORE, 47551504650240, 47551504801791,
+STORE, 47551504748544, 47551504801791,
+STORE, 47551504650240, 47551504748543,
+ERASE, 47551504650240, 47551504650240,
+STORE, 47551504650240, 47551504748543,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504748544, 47551504793599,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504801791,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504793599,
+STORE, 47551504793600, 47551504801791,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504801792, 47551507853311,
+STORE, 47551505346560, 47551507853311,
+STORE, 47551504801792, 47551505346559,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507632127,
+STORE, 47551507632128, 47551507853311,
+STORE, 47551507042304, 47551507632127,
+STORE, 47551505346560, 47551507042303,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507042303,
+STORE, 47551507628032, 47551507632127,
+STORE, 47551507042304, 47551507628031,
+ERASE, 47551507042304, 47551507042304,
+STORE, 47551507042304, 47551507628031,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507836928, 47551507836928,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507853312, 47551509692415,
+STORE, 47551507992576, 47551509692415,
+STORE, 47551507853312, 47551507992575,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509651455,
+STORE, 47551509651456, 47551509692415,
+STORE, 47551509336064, 47551509651455,
+STORE, 47551507992576, 47551509336063,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509336063,
+STORE, 47551509647360, 47551509651455,
+STORE, 47551509336064, 47551509647359,
+ERASE, 47551509336064, 47551509336064,
+STORE, 47551509336064, 47551509647359,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509676032, 47551509676032,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509692416, 47551509827583,
+ERASE, 47551509692416, 47551509692416,
+STORE, 47551509692416, 47551509716991,
+STORE, 47551509716992, 47551509827583,
+STORE, 47551509778432, 47551509827583,
+STORE, 47551509716992, 47551509778431,
+ERASE, 47551509716992, 47551509716992,
+STORE, 47551509716992, 47551509778431,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509778432, 47551509803007,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509827583,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509803007,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509811200, 47551509811200,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509827584, 47551509848063,
+ERASE, 47551509827584, 47551509827584,
+STORE, 47551509827584, 47551509831679,
+STORE, 47551509831680, 47551509848063,
+STORE, 47551509835776, 47551509848063,
+STORE, 47551509831680, 47551509835775,
+ERASE, 47551509831680, 47551509831680,
+STORE, 47551509831680, 47551509835775,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509835776, 47551509839871,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509848063,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509839871,
+STORE, 47551509839872, 47551509848063,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509848064, 47551509856255,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509667839,
+STORE, 47551509667840, 47551509676031,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509843967,
+STORE, 47551509843968, 47551509848063,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509807103,
+STORE, 47551509807104, 47551509811199,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507828735,
+STORE, 47551507828736, 47551507836927,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504797695,
+STORE, 47551504797696, 47551504801791,
+ERASE, 94150123233280, 94150123233280,
+STORE, 94150123233280, 94150123241471,
+STORE, 94150123241472, 94150123245567,
+ERASE, 140081290534912, 140081290534912,
+STORE, 140081290534912, 140081290539007,
+STORE, 140081290539008, 140081290543103,
+ERASE, 47551504621568, 47551504621568,
+STORE, 94150148112384, 94150148247551,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734389334016, 140737488351231,
+ERASE, 140734389334016, 140734389334016,
+STORE, 140734389334016, 140734389338111,
+STORE, 94844636606464, 94844636778495,
+ERASE, 94844636606464, 94844636606464,
+STORE, 94844636606464, 94844636622847,
+STORE, 94844636622848, 94844636778495,
+ERASE, 94844636622848, 94844636622848,
+STORE, 94844636622848, 94844636725247,
+STORE, 94844636725248, 94844636766207,
+STORE, 94844636766208, 94844636778495,
+STORE, 139922765217792, 139922765389823,
+ERASE, 139922765217792, 139922765217792,
+STORE, 139922765217792, 139922765221887,
+STORE, 139922765221888, 139922765389823,
+ERASE, 139922765221888, 139922765221888,
+STORE, 139922765221888, 139922765344767,
+STORE, 139922765344768, 139922765377535,
+STORE, 139922765377536, 139922765385727,
+STORE, 139922765385728, 139922765389823,
+STORE, 140734389678080, 140734389682175,
+STORE, 140734389665792, 140734389678079,
+STORE, 47710029778944, 47710029787135,
+STORE, 47710029787136, 47710029795327,
+STORE, 47710029795328, 47710029959167,
+ERASE, 47710029795328, 47710029795328,
+STORE, 47710029795328, 47710029807615,
+STORE, 47710029807616, 47710029959167,
+STORE, 47710029905920, 47710029959167,
+STORE, 47710029807616, 47710029905919,
+ERASE, 47710029807616, 47710029807616,
+STORE, 47710029807616, 47710029905919,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029905920, 47710029950975,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029959167,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029950975,
+STORE, 47710029950976, 47710029959167,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029959168, 47710033010687,
+STORE, 47710030503936, 47710033010687,
+STORE, 47710029959168, 47710030503935,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032789503,
+STORE, 47710032789504, 47710033010687,
+STORE, 47710032199680, 47710032789503,
+STORE, 47710030503936, 47710032199679,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032199679,
+STORE, 47710032785408, 47710032789503,
+STORE, 47710032199680, 47710032785407,
+ERASE, 47710032199680, 47710032199680,
+STORE, 47710032199680, 47710032785407,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032994304, 47710032994304,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710033010688, 47710034849791,
+STORE, 47710033149952, 47710034849791,
+STORE, 47710033010688, 47710033149951,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034808831,
+STORE, 47710034808832, 47710034849791,
+STORE, 47710034493440, 47710034808831,
+STORE, 47710033149952, 47710034493439,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034493439,
+STORE, 47710034804736, 47710034808831,
+STORE, 47710034493440, 47710034804735,
+ERASE, 47710034493440, 47710034493440,
+STORE, 47710034493440, 47710034804735,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034833408, 47710034833408,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034849792, 47710034984959,
+ERASE, 47710034849792, 47710034849792,
+STORE, 47710034849792, 47710034874367,
+STORE, 47710034874368, 47710034984959,
+STORE, 47710034935808, 47710034984959,
+STORE, 47710034874368, 47710034935807,
+ERASE, 47710034874368, 47710034874368,
+STORE, 47710034874368, 47710034935807,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034935808, 47710034960383,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034984959,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034960383,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034968576, 47710034968576,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034984960, 47710035005439,
+ERASE, 47710034984960, 47710034984960,
+STORE, 47710034984960, 47710034989055,
+STORE, 47710034989056, 47710035005439,
+STORE, 47710034993152, 47710035005439,
+STORE, 47710034989056, 47710034993151,
+ERASE, 47710034989056, 47710034989056,
+STORE, 47710034989056, 47710034993151,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710034993152, 47710034997247,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710035005439,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710034997247,
+STORE, 47710034997248, 47710035005439,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710035005440, 47710035013631,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034825215,
+STORE, 47710034825216, 47710034833407,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035001343,
+STORE, 47710035001344, 47710035005439,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034964479,
+STORE, 47710034964480, 47710034968575,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032986111,
+STORE, 47710032986112, 47710032994303,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029955071,
+STORE, 47710029955072, 47710029959167,
+ERASE, 94844636766208, 94844636766208,
+STORE, 94844636766208, 94844636774399,
+STORE, 94844636774400, 94844636778495,
+ERASE, 139922765377536, 139922765377536,
+STORE, 139922765377536, 139922765381631,
+STORE, 139922765381632, 139922765385727,
+ERASE, 47710029778944, 47710029778944,
+STORE, 94844641775616, 94844641910783,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732213886976, 140737488351231,
+ERASE, 140732213886976, 140732213886976,
+STORE, 140732213886976, 140732213891071,
+STORE, 94240508887040, 94240509059071,
+ERASE, 94240508887040, 94240508887040,
+STORE, 94240508887040, 94240508903423,
+STORE, 94240508903424, 94240509059071,
+ERASE, 94240508903424, 94240508903424,
+STORE, 94240508903424, 94240509005823,
+STORE, 94240509005824, 94240509046783,
+STORE, 94240509046784, 94240509059071,
+STORE, 140275106516992, 140275106689023,
+ERASE, 140275106516992, 140275106516992,
+STORE, 140275106516992, 140275106521087,
+STORE, 140275106521088, 140275106689023,
+ERASE, 140275106521088, 140275106521088,
+STORE, 140275106521088, 140275106643967,
+STORE, 140275106643968, 140275106676735,
+STORE, 140275106676736, 140275106684927,
+STORE, 140275106684928, 140275106689023,
+STORE, 140732213977088, 140732213981183,
+STORE, 140732213964800, 140732213977087,
+STORE, 47357688479744, 47357688487935,
+STORE, 47357688487936, 47357688496127,
+STORE, 47357688496128, 47357688659967,
+ERASE, 47357688496128, 47357688496128,
+STORE, 47357688496128, 47357688508415,
+STORE, 47357688508416, 47357688659967,
+STORE, 47357688606720, 47357688659967,
+STORE, 47357688508416, 47357688606719,
+ERASE, 47357688508416, 47357688508416,
+STORE, 47357688508416, 47357688606719,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688606720, 47357688651775,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688659967,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688651775,
+STORE, 47357688651776, 47357688659967,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688659968, 47357691711487,
+STORE, 47357689204736, 47357691711487,
+STORE, 47357688659968, 47357689204735,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357691490303,
+STORE, 47357691490304, 47357691711487,
+STORE, 47357690900480, 47357691490303,
+STORE, 47357689204736, 47357690900479,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357690900479,
+STORE, 47357691486208, 47357691490303,
+STORE, 47357690900480, 47357691486207,
+ERASE, 47357690900480, 47357690900480,
+STORE, 47357690900480, 47357691486207,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691695104, 47357691695104,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691711488, 47357693550591,
+STORE, 47357691850752, 47357693550591,
+STORE, 47357691711488, 47357691850751,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693509631,
+STORE, 47357693509632, 47357693550591,
+STORE, 47357693194240, 47357693509631,
+STORE, 47357691850752, 47357693194239,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693194239,
+STORE, 47357693505536, 47357693509631,
+STORE, 47357693194240, 47357693505535,
+ERASE, 47357693194240, 47357693194240,
+STORE, 47357693194240, 47357693505535,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693534208, 47357693534208,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693550592, 47357693685759,
+ERASE, 47357693550592, 47357693550592,
+STORE, 47357693550592, 47357693575167,
+STORE, 47357693575168, 47357693685759,
+STORE, 47357693636608, 47357693685759,
+STORE, 47357693575168, 47357693636607,
+ERASE, 47357693575168, 47357693575168,
+STORE, 47357693575168, 47357693636607,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693636608, 47357693661183,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693685759,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693661183,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693669376, 47357693669376,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693685760, 47357693706239,
+ERASE, 47357693685760, 47357693685760,
+STORE, 47357693685760, 47357693689855,
+STORE, 47357693689856, 47357693706239,
+STORE, 47357693693952, 47357693706239,
+STORE, 47357693689856, 47357693693951,
+ERASE, 47357693689856, 47357693689856,
+STORE, 47357693689856, 47357693693951,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693693952, 47357693698047,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693706239,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693698047,
+STORE, 47357693698048, 47357693706239,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693706240, 47357693714431,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693526015,
+STORE, 47357693526016, 47357693534207,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693702143,
+STORE, 47357693702144, 47357693706239,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693665279,
+STORE, 47357693665280, 47357693669375,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691686911,
+STORE, 47357691686912, 47357691695103,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688655871,
+STORE, 47357688655872, 47357688659967,
+ERASE, 94240509046784, 94240509046784,
+STORE, 94240509046784, 94240509054975,
+STORE, 94240509054976, 94240509059071,
+ERASE, 140275106676736, 140275106676736,
+STORE, 140275106676736, 140275106680831,
+STORE, 140275106680832, 140275106684927,
+ERASE, 47357688479744, 47357688479744,
+STORE, 94240518361088, 94240518496255,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732688277504, 140737488351231,
+ERASE, 140732688277504, 140732688277504,
+STORE, 140732688277504, 140732688281599,
+STORE, 94629171351552, 94629172064255,
+ERASE, 94629171351552, 94629171351552,
+STORE, 94629171351552, 94629171400703,
+STORE, 94629171400704, 94629172064255,
+ERASE, 94629171400704, 94629171400704,
+STORE, 94629171400704, 94629171945471,
+STORE, 94629171945472, 94629172043775,
+STORE, 94629172043776, 94629172064255,
+STORE, 139770707644416, 139770707816447,
+ERASE, 139770707644416, 139770707644416,
+STORE, 139770707644416, 139770707648511,
+STORE, 139770707648512, 139770707816447,
+ERASE, 139770707648512, 139770707648512,
+STORE, 139770707648512, 139770707771391,
+STORE, 139770707771392, 139770707804159,
+STORE, 139770707804160, 139770707812351,
+STORE, 139770707812352, 139770707816447,
+STORE, 140732689121280, 140732689125375,
+STORE, 140732689108992, 140732689121279,
+STORE, 47862087352320, 47862087360511,
+STORE, 47862087360512, 47862087368703,
+STORE, 47862087368704, 47862087475199,
+STORE, 47862087385088, 47862087475199,
+STORE, 47862087368704, 47862087385087,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087458815,
+STORE, 47862087458816, 47862087475199,
+STORE, 47862087438336, 47862087458815,
+STORE, 47862087385088, 47862087438335,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087438335,
+STORE, 47862087454720, 47862087458815,
+STORE, 47862087438336, 47862087454719,
+ERASE, 47862087438336, 47862087438336,
+STORE, 47862087438336, 47862087454719,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087467008, 47862087467008,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087475200, 47862089314303,
+STORE, 47862087614464, 47862089314303,
+STORE, 47862087475200, 47862087614463,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862089273343,
+STORE, 47862089273344, 47862089314303,
+STORE, 47862088957952, 47862089273343,
+STORE, 47862087614464, 47862088957951,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862088957951,
+STORE, 47862089269248, 47862089273343,
+STORE, 47862088957952, 47862089269247,
+ERASE, 47862088957952, 47862088957952,
+STORE, 47862088957952, 47862089269247,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089297920, 47862089297920,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089297920, 47862089326591,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089289727,
+STORE, 47862089289728, 47862089297919,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087462911,
+STORE, 47862087462912, 47862087467007,
+ERASE, 94629172043776, 94629172043776,
+STORE, 94629172043776, 94629172060159,
+STORE, 94629172060160, 94629172064255,
+ERASE, 139770707804160, 139770707804160,
+STORE, 139770707804160, 139770707808255,
+STORE, 139770707808256, 139770707812351,
+ERASE, 47862087352320, 47862087352320,
+STORE, 94629197533184, 94629197668351,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727540711424, 140737488351231,
+ERASE, 140727540711424, 140727540711424,
+STORE, 140727540711424, 140727540715519,
+STORE, 94299865313280, 94299866025983,
+ERASE, 94299865313280, 94299865313280,
+STORE, 94299865313280, 94299865362431,
+STORE, 94299865362432, 94299866025983,
+ERASE, 94299865362432, 94299865362432,
+STORE, 94299865362432, 94299865907199,
+STORE, 94299865907200, 94299866005503,
+STORE, 94299866005504, 94299866025983,
+STORE, 140680268763136, 140680268935167,
+ERASE, 140680268763136, 140680268763136,
+STORE, 140680268763136, 140680268767231,
+STORE, 140680268767232, 140680268935167,
+ERASE, 140680268767232, 140680268767232,
+STORE, 140680268767232, 140680268890111,
+STORE, 140680268890112, 140680268922879,
+STORE, 140680268922880, 140680268931071,
+STORE, 140680268931072, 140680268935167,
+STORE, 140727541424128, 140727541428223,
+STORE, 140727541411840, 140727541424127,
+STORE, 46952526233600, 46952526241791,
+STORE, 46952526241792, 46952526249983,
+STORE, 46952526249984, 46952526356479,
+STORE, 46952526266368, 46952526356479,
+STORE, 46952526249984, 46952526266367,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526340095,
+STORE, 46952526340096, 46952526356479,
+STORE, 46952526319616, 46952526340095,
+STORE, 46952526266368, 46952526319615,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526319615,
+STORE, 46952526336000, 46952526340095,
+STORE, 46952526319616, 46952526335999,
+ERASE, 46952526319616, 46952526319616,
+STORE, 46952526319616, 46952526335999,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526348288, 46952526348288,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526356480, 46952528195583,
+STORE, 46952526495744, 46952528195583,
+STORE, 46952526356480, 46952526495743,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952528154623,
+STORE, 46952528154624, 46952528195583,
+STORE, 46952527839232, 46952528154623,
+STORE, 46952526495744, 46952527839231,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952527839231,
+STORE, 46952528150528, 46952528154623,
+STORE, 46952527839232, 46952528150527,
+ERASE, 46952527839232, 46952527839232,
+STORE, 46952527839232, 46952528150527,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528179200, 46952528179200,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528179200, 46952528207871,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528171007,
+STORE, 46952528171008, 46952528179199,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526344191,
+STORE, 46952526344192, 46952526348287,
+ERASE, 94299866005504, 94299866005504,
+STORE, 94299866005504, 94299866021887,
+STORE, 94299866021888, 94299866025983,
+ERASE, 140680268922880, 140680268922880,
+STORE, 140680268922880, 140680268926975,
+STORE, 140680268926976, 140680268931071,
+ERASE, 46952526233600, 46952526233600,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722874793984, 140737488351231,
+ERASE, 140722874793984, 140722874793984,
+STORE, 140722874793984, 140722874798079,
+STORE, 94448916213760, 94448916926463,
+ERASE, 94448916213760, 94448916213760,
+STORE, 94448916213760, 94448916262911,
+STORE, 94448916262912, 94448916926463,
+ERASE, 94448916262912, 94448916262912,
+STORE, 94448916262912, 94448916807679,
+STORE, 94448916807680, 94448916905983,
+STORE, 94448916905984, 94448916926463,
+STORE, 140389117046784, 140389117218815,
+ERASE, 140389117046784, 140389117046784,
+STORE, 140389117046784, 140389117050879,
+STORE, 140389117050880, 140389117218815,
+ERASE, 140389117050880, 140389117050880,
+STORE, 140389117050880, 140389117173759,
+STORE, 140389117173760, 140389117206527,
+STORE, 140389117206528, 140389117214719,
+STORE, 140389117214720, 140389117218815,
+STORE, 140722875297792, 140722875301887,
+STORE, 140722875285504, 140722875297791,
+STORE, 47243677949952, 47243677958143,
+STORE, 47243677958144, 47243677966335,
+STORE, 47243677966336, 47243678072831,
+STORE, 47243677982720, 47243678072831,
+STORE, 47243677966336, 47243677982719,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678056447,
+STORE, 47243678056448, 47243678072831,
+STORE, 47243678035968, 47243678056447,
+STORE, 47243677982720, 47243678035967,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678035967,
+STORE, 47243678052352, 47243678056447,
+STORE, 47243678035968, 47243678052351,
+ERASE, 47243678035968, 47243678035968,
+STORE, 47243678035968, 47243678052351,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678064640, 47243678064640,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678072832, 47243679911935,
+STORE, 47243678212096, 47243679911935,
+STORE, 47243678072832, 47243678212095,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679870975,
+STORE, 47243679870976, 47243679911935,
+STORE, 47243679555584, 47243679870975,
+STORE, 47243678212096, 47243679555583,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679555583,
+STORE, 47243679866880, 47243679870975,
+STORE, 47243679555584, 47243679866879,
+ERASE, 47243679555584, 47243679555584,
+STORE, 47243679555584, 47243679866879,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679895552, 47243679895552,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679895552, 47243679924223,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679887359,
+STORE, 47243679887360, 47243679895551,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678060543,
+STORE, 47243678060544, 47243678064639,
+ERASE, 94448916905984, 94448916905984,
+STORE, 94448916905984, 94448916922367,
+STORE, 94448916922368, 94448916926463,
+ERASE, 140389117206528, 140389117206528,
+STORE, 140389117206528, 140389117210623,
+STORE, 140389117210624, 140389117214719,
+ERASE, 47243677949952, 47243677949952,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733068505088, 140737488351231,
+ERASE, 140733068505088, 140733068505088,
+STORE, 140733068505088, 140733068509183,
+STORE, 94207145750528, 94207146463231,
+ERASE, 94207145750528, 94207145750528,
+STORE, 94207145750528, 94207145799679,
+STORE, 94207145799680, 94207146463231,
+ERASE, 94207145799680, 94207145799680,
+STORE, 94207145799680, 94207146344447,
+STORE, 94207146344448, 94207146442751,
+STORE, 94207146442752, 94207146463231,
+STORE, 140684504911872, 140684505083903,
+ERASE, 140684504911872, 140684504911872,
+STORE, 140684504911872, 140684504915967,
+STORE, 140684504915968, 140684505083903,
+ERASE, 140684504915968, 140684504915968,
+STORE, 140684504915968, 140684505038847,
+STORE, 140684505038848, 140684505071615,
+STORE, 140684505071616, 140684505079807,
+STORE, 140684505079808, 140684505083903,
+STORE, 140733068607488, 140733068611583,
+STORE, 140733068595200, 140733068607487,
+STORE, 46948290084864, 46948290093055,
+STORE, 46948290093056, 46948290101247,
+STORE, 46948290101248, 46948290207743,
+STORE, 46948290117632, 46948290207743,
+STORE, 46948290101248, 46948290117631,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290191359,
+STORE, 46948290191360, 46948290207743,
+STORE, 46948290170880, 46948290191359,
+STORE, 46948290117632, 46948290170879,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290170879,
+STORE, 46948290187264, 46948290191359,
+STORE, 46948290170880, 46948290187263,
+ERASE, 46948290170880, 46948290170880,
+STORE, 46948290170880, 46948290187263,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290199552, 46948290199552,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290207744, 46948292046847,
+STORE, 46948290347008, 46948292046847,
+STORE, 46948290207744, 46948290347007,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948292005887,
+STORE, 46948292005888, 46948292046847,
+STORE, 46948291690496, 46948292005887,
+STORE, 46948290347008, 46948291690495,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948291690495,
+STORE, 46948292001792, 46948292005887,
+STORE, 46948291690496, 46948292001791,
+ERASE, 46948291690496, 46948291690496,
+STORE, 46948291690496, 46948292001791,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292030464, 46948292030464,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292030464, 46948292059135,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292022271,
+STORE, 46948292022272, 46948292030463,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290195455,
+STORE, 46948290195456, 46948290199551,
+ERASE, 94207146442752, 94207146442752,
+STORE, 94207146442752, 94207146459135,
+STORE, 94207146459136, 94207146463231,
+ERASE, 140684505071616, 140684505071616,
+STORE, 140684505071616, 140684505075711,
+STORE, 140684505075712, 140684505079807,
+ERASE, 46948290084864, 46948290084864,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726367158272, 140737488351231,
+ERASE, 140726367158272, 140726367158272,
+STORE, 140726367158272, 140726367162367,
+STORE, 94436124106752, 94436124819455,
+ERASE, 94436124106752, 94436124106752,
+STORE, 94436124106752, 94436124155903,
+STORE, 94436124155904, 94436124819455,
+ERASE, 94436124155904, 94436124155904,
+STORE, 94436124155904, 94436124700671,
+STORE, 94436124700672, 94436124798975,
+STORE, 94436124798976, 94436124819455,
+STORE, 140049025044480, 140049025216511,
+ERASE, 140049025044480, 140049025044480,
+STORE, 140049025044480, 140049025048575,
+STORE, 140049025048576, 140049025216511,
+ERASE, 140049025048576, 140049025048576,
+STORE, 140049025048576, 140049025171455,
+STORE, 140049025171456, 140049025204223,
+STORE, 140049025204224, 140049025212415,
+STORE, 140049025212416, 140049025216511,
+STORE, 140726367256576, 140726367260671,
+STORE, 140726367244288, 140726367256575,
+STORE, 47583769952256, 47583769960447,
+STORE, 47583769960448, 47583769968639,
+STORE, 47583769968640, 47583770075135,
+STORE, 47583769985024, 47583770075135,
+STORE, 47583769968640, 47583769985023,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770058751,
+STORE, 47583770058752, 47583770075135,
+STORE, 47583770038272, 47583770058751,
+STORE, 47583769985024, 47583770038271,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770038271,
+STORE, 47583770054656, 47583770058751,
+STORE, 47583770038272, 47583770054655,
+ERASE, 47583770038272, 47583770038272,
+STORE, 47583770038272, 47583770054655,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770066944, 47583770066944,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770075136, 47583771914239,
+STORE, 47583770214400, 47583771914239,
+STORE, 47583770075136, 47583770214399,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771873279,
+STORE, 47583771873280, 47583771914239,
+STORE, 47583771557888, 47583771873279,
+STORE, 47583770214400, 47583771557887,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771557887,
+STORE, 47583771869184, 47583771873279,
+STORE, 47583771557888, 47583771869183,
+ERASE, 47583771557888, 47583771557888,
+STORE, 47583771557888, 47583771869183,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771897856, 47583771897856,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771897856, 47583771926527,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771889663,
+STORE, 47583771889664, 47583771897855,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770062847,
+STORE, 47583770062848, 47583770066943,
+ERASE, 94436124798976, 94436124798976,
+STORE, 94436124798976, 94436124815359,
+STORE, 94436124815360, 94436124819455,
+ERASE, 140049025204224, 140049025204224,
+STORE, 140049025204224, 140049025208319,
+STORE, 140049025208320, 140049025212415,
+ERASE, 47583769952256, 47583769952256,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727116099584, 140737488351231,
+ERASE, 140727116099584, 140727116099584,
+STORE, 140727116099584, 140727116103679,
+STORE, 94166319734784, 94166320447487,
+ERASE, 94166319734784, 94166319734784,
+STORE, 94166319734784, 94166319783935,
+STORE, 94166319783936, 94166320447487,
+ERASE, 94166319783936, 94166319783936,
+STORE, 94166319783936, 94166320328703,
+STORE, 94166320328704, 94166320427007,
+STORE, 94166320427008, 94166320447487,
+STORE, 139976559542272, 139976559714303,
+ERASE, 139976559542272, 139976559542272,
+STORE, 139976559542272, 139976559546367,
+STORE, 139976559546368, 139976559714303,
+ERASE, 139976559546368, 139976559546368,
+STORE, 139976559546368, 139976559669247,
+STORE, 139976559669248, 139976559702015,
+STORE, 139976559702016, 139976559710207,
+STORE, 139976559710208, 139976559714303,
+STORE, 140727116222464, 140727116226559,
+STORE, 140727116210176, 140727116222463,
+STORE, 47656235454464, 47656235462655,
+STORE, 47656235462656, 47656235470847,
+STORE, 47656235470848, 47656235577343,
+STORE, 47656235487232, 47656235577343,
+STORE, 47656235470848, 47656235487231,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235560959,
+STORE, 47656235560960, 47656235577343,
+STORE, 47656235540480, 47656235560959,
+STORE, 47656235487232, 47656235540479,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235540479,
+STORE, 47656235556864, 47656235560959,
+STORE, 47656235540480, 47656235556863,
+ERASE, 47656235540480, 47656235540480,
+STORE, 47656235540480, 47656235556863,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235569152, 47656235569152,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235577344, 47656237416447,
+STORE, 47656235716608, 47656237416447,
+STORE, 47656235577344, 47656235716607,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237375487,
+STORE, 47656237375488, 47656237416447,
+STORE, 47656237060096, 47656237375487,
+STORE, 47656235716608, 47656237060095,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237060095,
+STORE, 47656237371392, 47656237375487,
+STORE, 47656237060096, 47656237371391,
+ERASE, 47656237060096, 47656237060096,
+STORE, 47656237060096, 47656237371391,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237400064, 47656237400064,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237400064, 47656237428735,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237391871,
+STORE, 47656237391872, 47656237400063,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235565055,
+STORE, 47656235565056, 47656235569151,
+ERASE, 94166320427008, 94166320427008,
+STORE, 94166320427008, 94166320443391,
+STORE, 94166320443392, 94166320447487,
+ERASE, 139976559702016, 139976559702016,
+STORE, 139976559702016, 139976559706111,
+STORE, 139976559706112, 139976559710207,
+ERASE, 47656235454464, 47656235454464,
+STORE, 94166332153856, 94166332289023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726412816384, 140737488351231,
+ERASE, 140726412816384, 140726412816384,
+STORE, 140726412816384, 140726412820479,
+STORE, 94094884507648, 94094885220351,
+ERASE, 94094884507648, 94094884507648,
+STORE, 94094884507648, 94094884556799,
+STORE, 94094884556800, 94094885220351,
+ERASE, 94094884556800, 94094884556800,
+STORE, 94094884556800, 94094885101567,
+STORE, 94094885101568, 94094885199871,
+STORE, 94094885199872, 94094885220351,
+STORE, 139773773938688, 139773774110719,
+ERASE, 139773773938688, 139773773938688,
+STORE, 139773773938688, 139773773942783,
+STORE, 139773773942784, 139773774110719,
+ERASE, 139773773942784, 139773773942784,
+STORE, 139773773942784, 139773774065663,
+STORE, 139773774065664, 139773774098431,
+STORE, 139773774098432, 139773774106623,
+STORE, 139773774106624, 139773774110719,
+STORE, 140726412963840, 140726412967935,
+STORE, 140726412951552, 140726412963839,
+STORE, 47859021058048, 47859021066239,
+STORE, 47859021066240, 47859021074431,
+STORE, 47859021074432, 47859021180927,
+STORE, 47859021090816, 47859021180927,
+STORE, 47859021074432, 47859021090815,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021164543,
+STORE, 47859021164544, 47859021180927,
+STORE, 47859021144064, 47859021164543,
+STORE, 47859021090816, 47859021144063,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021144063,
+STORE, 47859021160448, 47859021164543,
+STORE, 47859021144064, 47859021160447,
+ERASE, 47859021144064, 47859021144064,
+STORE, 47859021144064, 47859021160447,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021172736, 47859021172736,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021180928, 47859023020031,
+STORE, 47859021320192, 47859023020031,
+STORE, 47859021180928, 47859021320191,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022979071,
+STORE, 47859022979072, 47859023020031,
+STORE, 47859022663680, 47859022979071,
+STORE, 47859021320192, 47859022663679,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022663679,
+STORE, 47859022974976, 47859022979071,
+STORE, 47859022663680, 47859022974975,
+ERASE, 47859022663680, 47859022663680,
+STORE, 47859022663680, 47859022974975,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859023003648, 47859023003648,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859023003648, 47859023032319,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859022995455,
+STORE, 47859022995456, 47859023003647,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021168639,
+STORE, 47859021168640, 47859021172735,
+ERASE, 94094885199872, 94094885199872,
+STORE, 94094885199872, 94094885216255,
+STORE, 94094885216256, 94094885220351,
+ERASE, 139773774098432, 139773774098432,
+STORE, 139773774098432, 139773774102527,
+STORE, 139773774102528, 139773774106623,
+ERASE, 47859021058048, 47859021058048,
+STORE, 94094901108736, 94094901243903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736567963648, 140737488351231,
+ERASE, 140736567963648, 140736567963648,
+STORE, 140736567963648, 140736567967743,
+STORE, 94924425748480, 94924426461183,
+ERASE, 94924425748480, 94924425748480,
+STORE, 94924425748480, 94924425797631,
+STORE, 94924425797632, 94924426461183,
+ERASE, 94924425797632, 94924425797632,
+STORE, 94924425797632, 94924426342399,
+STORE, 94924426342400, 94924426440703,
+STORE, 94924426440704, 94924426461183,
+STORE, 140042126319616, 140042126491647,
+ERASE, 140042126319616, 140042126319616,
+STORE, 140042126319616, 140042126323711,
+STORE, 140042126323712, 140042126491647,
+ERASE, 140042126323712, 140042126323712,
+STORE, 140042126323712, 140042126446591,
+STORE, 140042126446592, 140042126479359,
+STORE, 140042126479360, 140042126487551,
+STORE, 140042126487552, 140042126491647,
+STORE, 140736568672256, 140736568676351,
+STORE, 140736568659968, 140736568672255,
+STORE, 47590668677120, 47590668685311,
+STORE, 47590668685312, 47590668693503,
+STORE, 47590668693504, 47590668799999,
+STORE, 47590668709888, 47590668799999,
+STORE, 47590668693504, 47590668709887,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668783615,
+STORE, 47590668783616, 47590668799999,
+STORE, 47590668763136, 47590668783615,
+STORE, 47590668709888, 47590668763135,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668763135,
+STORE, 47590668779520, 47590668783615,
+STORE, 47590668763136, 47590668779519,
+ERASE, 47590668763136, 47590668763136,
+STORE, 47590668763136, 47590668779519,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668791808, 47590668791808,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668800000, 47590670639103,
+STORE, 47590668939264, 47590670639103,
+STORE, 47590668800000, 47590668939263,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670598143,
+STORE, 47590670598144, 47590670639103,
+STORE, 47590670282752, 47590670598143,
+STORE, 47590668939264, 47590670282751,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670282751,
+STORE, 47590670594048, 47590670598143,
+STORE, 47590670282752, 47590670594047,
+ERASE, 47590670282752, 47590670282752,
+STORE, 47590670282752, 47590670594047,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670622720, 47590670622720,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670622720, 47590670651391,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670614527,
+STORE, 47590670614528, 47590670622719,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668787711,
+STORE, 47590668787712, 47590668791807,
+ERASE, 94924426440704, 94924426440704,
+STORE, 94924426440704, 94924426457087,
+STORE, 94924426457088, 94924426461183,
+ERASE, 140042126479360, 140042126479360,
+STORE, 140042126479360, 140042126483455,
+STORE, 140042126483456, 140042126487551,
+ERASE, 47590668677120, 47590668677120,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733281439744, 140737488351231,
+ERASE, 140733281439744, 140733281439744,
+STORE, 140733281439744, 140733281443839,
+STORE, 94490667069440, 94490667782143,
+ERASE, 94490667069440, 94490667069440,
+STORE, 94490667069440, 94490667118591,
+STORE, 94490667118592, 94490667782143,
+ERASE, 94490667118592, 94490667118592,
+STORE, 94490667118592, 94490667663359,
+STORE, 94490667663360, 94490667761663,
+STORE, 94490667761664, 94490667782143,
+STORE, 139878215118848, 139878215290879,
+ERASE, 139878215118848, 139878215118848,
+STORE, 139878215118848, 139878215122943,
+STORE, 139878215122944, 139878215290879,
+ERASE, 139878215122944, 139878215122944,
+STORE, 139878215122944, 139878215245823,
+STORE, 139878215245824, 139878215278591,
+STORE, 139878215278592, 139878215286783,
+STORE, 139878215286784, 139878215290879,
+STORE, 140733281464320, 140733281468415,
+STORE, 140733281452032, 140733281464319,
+STORE, 47754579877888, 47754579886079,
+STORE, 47754579886080, 47754579894271,
+STORE, 47754579894272, 47754580000767,
+STORE, 47754579910656, 47754580000767,
+STORE, 47754579894272, 47754579910655,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579984383,
+STORE, 47754579984384, 47754580000767,
+STORE, 47754579963904, 47754579984383,
+STORE, 47754579910656, 47754579963903,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579963903,
+STORE, 47754579980288, 47754579984383,
+STORE, 47754579963904, 47754579980287,
+ERASE, 47754579963904, 47754579963904,
+STORE, 47754579963904, 47754579980287,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579992576, 47754579992576,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754580000768, 47754581839871,
+STORE, 47754580140032, 47754581839871,
+STORE, 47754580000768, 47754580140031,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581798911,
+STORE, 47754581798912, 47754581839871,
+STORE, 47754581483520, 47754581798911,
+STORE, 47754580140032, 47754581483519,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581483519,
+STORE, 47754581794816, 47754581798911,
+STORE, 47754581483520, 47754581794815,
+ERASE, 47754581483520, 47754581483520,
+STORE, 47754581483520, 47754581794815,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581823488, 47754581823488,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581823488, 47754581852159,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581815295,
+STORE, 47754581815296, 47754581823487,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579988479,
+STORE, 47754579988480, 47754579992575,
+ERASE, 94490667761664, 94490667761664,
+STORE, 94490667761664, 94490667778047,
+STORE, 94490667778048, 94490667782143,
+ERASE, 139878215278592, 139878215278592,
+STORE, 139878215278592, 139878215282687,
+STORE, 139878215282688, 139878215286783,
+ERASE, 47754579877888, 47754579877888,
+STORE, 94490669649920, 94490669785087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140735382188032, 140737488351231,
+ERASE, 140735382188032, 140735382188032,
+STORE, 140735382188032, 140735382192127,
+STORE, 94150181302272, 94150182014975,
+ERASE, 94150181302272, 94150181302272,
+STORE, 94150181302272, 94150181351423,
+STORE, 94150181351424, 94150182014975,
+ERASE, 94150181351424, 94150181351424,
+STORE, 94150181351424, 94150181896191,
+STORE, 94150181896192, 94150181994495,
+STORE, 94150181994496, 94150182014975,
+STORE, 139679752458240, 139679752630271,
+ERASE, 139679752458240, 139679752458240,
+STORE, 139679752458240, 139679752462335,
+STORE, 139679752462336, 139679752630271,
+ERASE, 139679752462336, 139679752462336,
+STORE, 139679752462336, 139679752585215,
+STORE, 139679752585216, 139679752617983,
+STORE, 139679752617984, 139679752626175,
+STORE, 139679752626176, 139679752630271,
+STORE, 140735382536192, 140735382540287,
+STORE, 140735382523904, 140735382536191,
+STORE, 47953042538496, 47953042546687,
+STORE, 47953042546688, 47953042554879,
+STORE, 47953042554880, 47953042661375,
+STORE, 47953042571264, 47953042661375,
+STORE, 47953042554880, 47953042571263,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042644991,
+STORE, 47953042644992, 47953042661375,
+STORE, 47953042624512, 47953042644991,
+STORE, 47953042571264, 47953042624511,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042624511,
+STORE, 47953042640896, 47953042644991,
+STORE, 47953042624512, 47953042640895,
+ERASE, 47953042624512, 47953042624512,
+STORE, 47953042624512, 47953042640895,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042653184, 47953042653184,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042661376, 47953044500479,
+STORE, 47953042800640, 47953044500479,
+STORE, 47953042661376, 47953042800639,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044459519,
+STORE, 47953044459520, 47953044500479,
+STORE, 47953044144128, 47953044459519,
+STORE, 47953042800640, 47953044144127,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044144127,
+STORE, 47953044455424, 47953044459519,
+STORE, 47953044144128, 47953044455423,
+ERASE, 47953044144128, 47953044144128,
+STORE, 47953044144128, 47953044455423,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044484096, 47953044484096,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044484096, 47953044512767,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044475903,
+STORE, 47953044475904, 47953044484095,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042649087,
+STORE, 47953042649088, 47953042653183,
+ERASE, 94150181994496, 94150181994496,
+STORE, 94150181994496, 94150182010879,
+STORE, 94150182010880, 94150182014975,
+ERASE, 139679752617984, 139679752617984,
+STORE, 139679752617984, 139679752622079,
+STORE, 139679752622080, 139679752626175,
+ERASE, 47953042538496, 47953042538496,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737044123648, 140737488351231,
+ERASE, 140737044123648, 140737044123648,
+STORE, 140737044123648, 140737044127743,
+STORE, 94425324294144, 94425325006847,
+ERASE, 94425324294144, 94425324294144,
+STORE, 94425324294144, 94425324343295,
+STORE, 94425324343296, 94425325006847,
+ERASE, 94425324343296, 94425324343296,
+STORE, 94425324343296, 94425324888063,
+STORE, 94425324888064, 94425324986367,
+STORE, 94425324986368, 94425325006847,
+STORE, 140382015016960, 140382015188991,
+ERASE, 140382015016960, 140382015016960,
+STORE, 140382015016960, 140382015021055,
+STORE, 140382015021056, 140382015188991,
+ERASE, 140382015021056, 140382015021056,
+STORE, 140382015021056, 140382015143935,
+STORE, 140382015143936, 140382015176703,
+STORE, 140382015176704, 140382015184895,
+STORE, 140382015184896, 140382015188991,
+STORE, 140737045585920, 140737045590015,
+STORE, 140737045573632, 140737045585919,
+STORE, 47250779979776, 47250779987967,
+STORE, 47250779987968, 47250779996159,
+STORE, 47250779996160, 47250780102655,
+STORE, 47250780012544, 47250780102655,
+STORE, 47250779996160, 47250780012543,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780086271,
+STORE, 47250780086272, 47250780102655,
+STORE, 47250780065792, 47250780086271,
+STORE, 47250780012544, 47250780065791,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780065791,
+STORE, 47250780082176, 47250780086271,
+STORE, 47250780065792, 47250780082175,
+ERASE, 47250780065792, 47250780065792,
+STORE, 47250780065792, 47250780082175,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780094464, 47250780094464,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780102656, 47250781941759,
+STORE, 47250780241920, 47250781941759,
+STORE, 47250780102656, 47250780241919,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781900799,
+STORE, 47250781900800, 47250781941759,
+STORE, 47250781585408, 47250781900799,
+STORE, 47250780241920, 47250781585407,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781585407,
+STORE, 47250781896704, 47250781900799,
+STORE, 47250781585408, 47250781896703,
+ERASE, 47250781585408, 47250781585408,
+STORE, 47250781585408, 47250781896703,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781925376, 47250781925376,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781925376, 47250781954047,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781917183,
+STORE, 47250781917184, 47250781925375,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780090367,
+STORE, 47250780090368, 47250780094463,
+ERASE, 94425324986368, 94425324986368,
+STORE, 94425324986368, 94425325002751,
+STORE, 94425325002752, 94425325006847,
+ERASE, 140382015176704, 140382015176704,
+STORE, 140382015176704, 140382015180799,
+STORE, 140382015180800, 140382015184895,
+ERASE, 47250779979776, 47250779979776,
+STORE, 94425351438336, 94425351573503,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736801144832, 140737488351231,
+ERASE, 140736801144832, 140736801144832,
+STORE, 140736801144832, 140736801148927,
+STORE, 94629429358592, 94629430071295,
+ERASE, 94629429358592, 94629429358592,
+STORE, 94629429358592, 94629429407743,
+STORE, 94629429407744, 94629430071295,
+ERASE, 94629429407744, 94629429407744,
+STORE, 94629429407744, 94629429952511,
+STORE, 94629429952512, 94629430050815,
+STORE, 94629430050816, 94629430071295,
+STORE, 139801685483520, 139801685655551,
+ERASE, 139801685483520, 139801685483520,
+STORE, 139801685483520, 139801685487615,
+STORE, 139801685487616, 139801685655551,
+ERASE, 139801685487616, 139801685487616,
+STORE, 139801685487616, 139801685610495,
+STORE, 139801685610496, 139801685643263,
+STORE, 139801685643264, 139801685651455,
+STORE, 139801685651456, 139801685655551,
+STORE, 140736801198080, 140736801202175,
+STORE, 140736801185792, 140736801198079,
+STORE, 47831109513216, 47831109521407,
+STORE, 47831109521408, 47831109529599,
+STORE, 47831109529600, 47831109636095,
+STORE, 47831109545984, 47831109636095,
+STORE, 47831109529600, 47831109545983,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109619711,
+STORE, 47831109619712, 47831109636095,
+STORE, 47831109599232, 47831109619711,
+STORE, 47831109545984, 47831109599231,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109599231,
+STORE, 47831109615616, 47831109619711,
+STORE, 47831109599232, 47831109615615,
+ERASE, 47831109599232, 47831109599232,
+STORE, 47831109599232, 47831109615615,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109627904, 47831109627904,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109636096, 47831111475199,
+STORE, 47831109775360, 47831111475199,
+STORE, 47831109636096, 47831109775359,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111434239,
+STORE, 47831111434240, 47831111475199,
+STORE, 47831111118848, 47831111434239,
+STORE, 47831109775360, 47831111118847,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111118847,
+STORE, 47831111430144, 47831111434239,
+STORE, 47831111118848, 47831111430143,
+ERASE, 47831111118848, 47831111118848,
+STORE, 47831111118848, 47831111430143,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111458816, 47831111458816,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111458816, 47831111487487,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111450623,
+STORE, 47831111450624, 47831111458815,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109623807,
+STORE, 47831109623808, 47831109627903,
+ERASE, 94629430050816, 94629430050816,
+STORE, 94629430050816, 94629430067199,
+STORE, 94629430067200, 94629430071295,
+ERASE, 139801685643264, 139801685643264,
+STORE, 139801685643264, 139801685647359,
+STORE, 139801685647360, 139801685651455,
+ERASE, 47831109513216, 47831109513216,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729419612160, 140737488351231,
+ERASE, 140729419612160, 140729419612160,
+STORE, 140729419612160, 140729419616255,
+STORE, 94443354148864, 94443354861567,
+ERASE, 94443354148864, 94443354148864,
+STORE, 94443354148864, 94443354198015,
+STORE, 94443354198016, 94443354861567,
+ERASE, 94443354198016, 94443354198016,
+STORE, 94443354198016, 94443354742783,
+STORE, 94443354742784, 94443354841087,
+STORE, 94443354841088, 94443354861567,
+STORE, 139741700038656, 139741700210687,
+ERASE, 139741700038656, 139741700038656,
+STORE, 139741700038656, 139741700042751,
+STORE, 139741700042752, 139741700210687,
+ERASE, 139741700042752, 139741700042752,
+STORE, 139741700042752, 139741700165631,
+STORE, 139741700165632, 139741700198399,
+STORE, 139741700198400, 139741700206591,
+STORE, 139741700206592, 139741700210687,
+STORE, 140729420574720, 140729420578815,
+STORE, 140729420562432, 140729420574719,
+STORE, 47891094958080, 47891094966271,
+STORE, 47891094966272, 47891094974463,
+STORE, 47891094974464, 47891095080959,
+STORE, 47891094990848, 47891095080959,
+STORE, 47891094974464, 47891094990847,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095064575,
+STORE, 47891095064576, 47891095080959,
+STORE, 47891095044096, 47891095064575,
+STORE, 47891094990848, 47891095044095,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095044095,
+STORE, 47891095060480, 47891095064575,
+STORE, 47891095044096, 47891095060479,
+ERASE, 47891095044096, 47891095044096,
+STORE, 47891095044096, 47891095060479,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095072768, 47891095072768,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095080960, 47891096920063,
+STORE, 47891095220224, 47891096920063,
+STORE, 47891095080960, 47891095220223,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096879103,
+STORE, 47891096879104, 47891096920063,
+STORE, 47891096563712, 47891096879103,
+STORE, 47891095220224, 47891096563711,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096563711,
+STORE, 47891096875008, 47891096879103,
+STORE, 47891096563712, 47891096875007,
+ERASE, 47891096563712, 47891096563712,
+STORE, 47891096563712, 47891096875007,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096903680, 47891096903680,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096903680, 47891096932351,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096895487,
+STORE, 47891096895488, 47891096903679,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095068671,
+STORE, 47891095068672, 47891095072767,
+ERASE, 94443354841088, 94443354841088,
+STORE, 94443354841088, 94443354857471,
+STORE, 94443354857472, 94443354861567,
+ERASE, 139741700198400, 139741700198400,
+STORE, 139741700198400, 139741700202495,
+STORE, 139741700202496, 139741700206591,
+ERASE, 47891094958080, 47891094958080,
+STORE, 94443360825344, 94443360960511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722961661952, 140737488351231,
+ERASE, 140722961661952, 140722961661952,
+STORE, 140722961661952, 140722961666047,
+STORE, 94878388944896, 94878389657599,
+ERASE, 94878388944896, 94878388944896,
+STORE, 94878388944896, 94878388994047,
+STORE, 94878388994048, 94878389657599,
+ERASE, 94878388994048, 94878388994048,
+STORE, 94878388994048, 94878389538815,
+STORE, 94878389538816, 94878389637119,
+STORE, 94878389637120, 94878389657599,
+STORE, 140210690056192, 140210690228223,
+ERASE, 140210690056192, 140210690056192,
+STORE, 140210690056192, 140210690060287,
+STORE, 140210690060288, 140210690228223,
+ERASE, 140210690060288, 140210690060288,
+STORE, 140210690060288, 140210690183167,
+STORE, 140210690183168, 140210690215935,
+STORE, 140210690215936, 140210690224127,
+STORE, 140210690224128, 140210690228223,
+STORE, 140722963148800, 140722963152895,
+STORE, 140722963136512, 140722963148799,
+STORE, 47422104940544, 47422104948735,
+STORE, 47422104948736, 47422104956927,
+STORE, 47422104956928, 47422105063423,
+STORE, 47422104973312, 47422105063423,
+STORE, 47422104956928, 47422104973311,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105047039,
+STORE, 47422105047040, 47422105063423,
+STORE, 47422105026560, 47422105047039,
+STORE, 47422104973312, 47422105026559,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105026559,
+STORE, 47422105042944, 47422105047039,
+STORE, 47422105026560, 47422105042943,
+ERASE, 47422105026560, 47422105026560,
+STORE, 47422105026560, 47422105042943,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105055232, 47422105055232,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105063424, 47422106902527,
+STORE, 47422105202688, 47422106902527,
+STORE, 47422105063424, 47422105202687,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106861567,
+STORE, 47422106861568, 47422106902527,
+STORE, 47422106546176, 47422106861567,
+STORE, 47422105202688, 47422106546175,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106546175,
+STORE, 47422106857472, 47422106861567,
+STORE, 47422106546176, 47422106857471,
+ERASE, 47422106546176, 47422106546176,
+STORE, 47422106546176, 47422106857471,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106886144, 47422106886144,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106886144, 47422106914815,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106877951,
+STORE, 47422106877952, 47422106886143,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105051135,
+STORE, 47422105051136, 47422105055231,
+ERASE, 94878389637120, 94878389637120,
+STORE, 94878389637120, 94878389653503,
+STORE, 94878389653504, 94878389657599,
+ERASE, 140210690215936, 140210690215936,
+STORE, 140210690215936, 140210690220031,
+STORE, 140210690220032, 140210690224127,
+ERASE, 47422104940544, 47422104940544,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727690309632, 140737488351231,
+ERASE, 140727690309632, 140727690309632,
+STORE, 140727690309632, 140727690313727,
+STORE, 94121892208640, 94121892921343,
+ERASE, 94121892208640, 94121892208640,
+STORE, 94121892208640, 94121892257791,
+STORE, 94121892257792, 94121892921343,
+ERASE, 94121892257792, 94121892257792,
+STORE, 94121892257792, 94121892802559,
+STORE, 94121892802560, 94121892900863,
+STORE, 94121892900864, 94121892921343,
+STORE, 140662438326272, 140662438498303,
+ERASE, 140662438326272, 140662438326272,
+STORE, 140662438326272, 140662438330367,
+STORE, 140662438330368, 140662438498303,
+ERASE, 140662438330368, 140662438330368,
+STORE, 140662438330368, 140662438453247,
+STORE, 140662438453248, 140662438486015,
+STORE, 140662438486016, 140662438494207,
+STORE, 140662438494208, 140662438498303,
+STORE, 140727690379264, 140727690383359,
+STORE, 140727690366976, 140727690379263,
+STORE, 46970356670464, 46970356678655,
+STORE, 46970356678656, 46970356686847,
+STORE, 46970356686848, 46970356793343,
+STORE, 46970356703232, 46970356793343,
+STORE, 46970356686848, 46970356703231,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356776959,
+STORE, 46970356776960, 46970356793343,
+STORE, 46970356756480, 46970356776959,
+STORE, 46970356703232, 46970356756479,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356756479,
+STORE, 46970356772864, 46970356776959,
+STORE, 46970356756480, 46970356772863,
+ERASE, 46970356756480, 46970356756480,
+STORE, 46970356756480, 46970356772863,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356785152, 46970356785152,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356793344, 46970358632447,
+STORE, 46970356932608, 46970358632447,
+STORE, 46970356793344, 46970356932607,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358591487,
+STORE, 46970358591488, 46970358632447,
+STORE, 46970358276096, 46970358591487,
+STORE, 46970356932608, 46970358276095,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358276095,
+STORE, 46970358587392, 46970358591487,
+STORE, 46970358276096, 46970358587391,
+ERASE, 46970358276096, 46970358276096,
+STORE, 46970358276096, 46970358587391,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358616064, 46970358616064,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358616064, 46970358644735,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358607871,
+STORE, 46970358607872, 46970358616063,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356781055,
+STORE, 46970356781056, 46970356785151,
+ERASE, 94121892900864, 94121892900864,
+STORE, 94121892900864, 94121892917247,
+STORE, 94121892917248, 94121892921343,
+ERASE, 140662438486016, 140662438486016,
+STORE, 140662438486016, 140662438490111,
+STORE, 140662438490112, 140662438494207,
+ERASE, 46970356670464, 46970356670464,
+STORE, 94121898610688, 94121898745855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737189351424, 140737488351231,
+ERASE, 140737189351424, 140737189351424,
+STORE, 140737189351424, 140737189355519,
+STORE, 93847948832768, 93847949545471,
+ERASE, 93847948832768, 93847948832768,
+STORE, 93847948832768, 93847948881919,
+STORE, 93847948881920, 93847949545471,
+ERASE, 93847948881920, 93847948881920,
+STORE, 93847948881920, 93847949426687,
+STORE, 93847949426688, 93847949524991,
+STORE, 93847949524992, 93847949545471,
+STORE, 139698989985792, 139698990157823,
+ERASE, 139698989985792, 139698989985792,
+STORE, 139698989985792, 139698989989887,
+STORE, 139698989989888, 139698990157823,
+ERASE, 139698989989888, 139698989989888,
+STORE, 139698989989888, 139698990112767,
+STORE, 139698990112768, 139698990145535,
+STORE, 139698990145536, 139698990153727,
+STORE, 139698990153728, 139698990157823,
+STORE, 140737189744640, 140737189748735,
+STORE, 140737189732352, 140737189744639,
+STORE, 47933805010944, 47933805019135,
+STORE, 47933805019136, 47933805027327,
+STORE, 47933805027328, 47933805133823,
+STORE, 47933805043712, 47933805133823,
+STORE, 47933805027328, 47933805043711,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805117439,
+STORE, 47933805117440, 47933805133823,
+STORE, 47933805096960, 47933805117439,
+STORE, 47933805043712, 47933805096959,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805096959,
+STORE, 47933805113344, 47933805117439,
+STORE, 47933805096960, 47933805113343,
+ERASE, 47933805096960, 47933805096960,
+STORE, 47933805096960, 47933805113343,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805125632, 47933805125632,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805133824, 47933806972927,
+STORE, 47933805273088, 47933806972927,
+STORE, 47933805133824, 47933805273087,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806931967,
+STORE, 47933806931968, 47933806972927,
+STORE, 47933806616576, 47933806931967,
+STORE, 47933805273088, 47933806616575,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806616575,
+STORE, 47933806927872, 47933806931967,
+STORE, 47933806616576, 47933806927871,
+ERASE, 47933806616576, 47933806616576,
+STORE, 47933806616576, 47933806927871,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806956544, 47933806956544,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806956544, 47933806985215,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806948351,
+STORE, 47933806948352, 47933806956543,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805121535,
+STORE, 47933805121536, 47933805125631,
+ERASE, 93847949524992, 93847949524992,
+STORE, 93847949524992, 93847949541375,
+STORE, 93847949541376, 93847949545471,
+ERASE, 139698990145536, 139698990145536,
+STORE, 139698990145536, 139698990149631,
+STORE, 139698990149632, 139698990153727,
+ERASE, 47933805010944, 47933805010944,
+STORE, 140737488347136, 140737488351231,
+STORE, 140725553991680, 140737488351231,
+ERASE, 140725553991680, 140725553991680,
+STORE, 140725553991680, 140725553995775,
+STORE, 93980056248320, 93980056961023,
+ERASE, 93980056248320, 93980056248320,
+STORE, 93980056248320, 93980056297471,
+STORE, 93980056297472, 93980056961023,
+ERASE, 93980056297472, 93980056297472,
+STORE, 93980056297472, 93980056842239,
+STORE, 93980056842240, 93980056940543,
+STORE, 93980056940544, 93980056961023,
+STORE, 140146588971008, 140146589143039,
+ERASE, 140146588971008, 140146588971008,
+STORE, 140146588971008, 140146588975103,
+STORE, 140146588975104, 140146589143039,
+ERASE, 140146588975104, 140146588975104,
+STORE, 140146588975104, 140146589097983,
+STORE, 140146589097984, 140146589130751,
+STORE, 140146589130752, 140146589138943,
+STORE, 140146589138944, 140146589143039,
+STORE, 140725554860032, 140725554864127,
+STORE, 140725554847744, 140725554860031,
+STORE, 47486206025728, 47486206033919,
+STORE, 47486206033920, 47486206042111,
+STORE, 47486206042112, 47486206148607,
+STORE, 47486206058496, 47486206148607,
+STORE, 47486206042112, 47486206058495,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206132223,
+STORE, 47486206132224, 47486206148607,
+STORE, 47486206111744, 47486206132223,
+STORE, 47486206058496, 47486206111743,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206111743,
+STORE, 47486206128128, 47486206132223,
+STORE, 47486206111744, 47486206128127,
+ERASE, 47486206111744, 47486206111744,
+STORE, 47486206111744, 47486206128127,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206140416, 47486206140416,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206148608, 47486207987711,
+STORE, 47486206287872, 47486207987711,
+STORE, 47486206148608, 47486206287871,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207946751,
+STORE, 47486207946752, 47486207987711,
+STORE, 47486207631360, 47486207946751,
+STORE, 47486206287872, 47486207631359,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207631359,
+STORE, 47486207942656, 47486207946751,
+STORE, 47486207631360, 47486207942655,
+ERASE, 47486207631360, 47486207631360,
+STORE, 47486207631360, 47486207942655,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207971328, 47486207971328,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207971328, 47486207999999,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207963135,
+STORE, 47486207963136, 47486207971327,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206136319,
+STORE, 47486206136320, 47486206140415,
+ERASE, 93980056940544, 93980056940544,
+STORE, 93980056940544, 93980056956927,
+STORE, 93980056956928, 93980056961023,
+ERASE, 140146589130752, 140146589130752,
+STORE, 140146589130752, 140146589134847,
+STORE, 140146589134848, 140146589138943,
+ERASE, 47486206025728, 47486206025728,
+STORE, 93980070006784, 93980070141951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727334776832, 140737488351231,
+ERASE, 140727334776832, 140727334776832,
+STORE, 140727334776832, 140727334780927,
+STORE, 94049747247104, 94049747959807,
+ERASE, 94049747247104, 94049747247104,
+STORE, 94049747247104, 94049747296255,
+STORE, 94049747296256, 94049747959807,
+ERASE, 94049747296256, 94049747296256,
+STORE, 94049747296256, 94049747841023,
+STORE, 94049747841024, 94049747939327,
+STORE, 94049747939328, 94049747959807,
+STORE, 140227307216896, 140227307388927,
+ERASE, 140227307216896, 140227307216896,
+STORE, 140227307216896, 140227307220991,
+STORE, 140227307220992, 140227307388927,
+ERASE, 140227307220992, 140227307220992,
+STORE, 140227307220992, 140227307343871,
+STORE, 140227307343872, 140227307376639,
+STORE, 140227307376640, 140227307384831,
+STORE, 140227307384832, 140227307388927,
+STORE, 140727335337984, 140727335342079,
+STORE, 140727335325696, 140727335337983,
+STORE, 47405487779840, 47405487788031,
+STORE, 47405487788032, 47405487796223,
+STORE, 47405487796224, 47405487902719,
+STORE, 47405487812608, 47405487902719,
+STORE, 47405487796224, 47405487812607,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487886335,
+STORE, 47405487886336, 47405487902719,
+STORE, 47405487865856, 47405487886335,
+STORE, 47405487812608, 47405487865855,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487865855,
+STORE, 47405487882240, 47405487886335,
+STORE, 47405487865856, 47405487882239,
+ERASE, 47405487865856, 47405487865856,
+STORE, 47405487865856, 47405487882239,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487894528, 47405487894528,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487902720, 47405489741823,
+STORE, 47405488041984, 47405489741823,
+STORE, 47405487902720, 47405488041983,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489700863,
+STORE, 47405489700864, 47405489741823,
+STORE, 47405489385472, 47405489700863,
+STORE, 47405488041984, 47405489385471,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489385471,
+STORE, 47405489696768, 47405489700863,
+STORE, 47405489385472, 47405489696767,
+ERASE, 47405489385472, 47405489385472,
+STORE, 47405489385472, 47405489696767,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489725440, 47405489725440,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489725440, 47405489754111,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489717247,
+STORE, 47405489717248, 47405489725439,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487890431,
+STORE, 47405487890432, 47405487894527,
+ERASE, 94049747939328, 94049747939328,
+STORE, 94049747939328, 94049747955711,
+STORE, 94049747955712, 94049747959807,
+ERASE, 140227307376640, 140227307376640,
+STORE, 140227307376640, 140227307380735,
+STORE, 140227307380736, 140227307384831,
+ERASE, 47405487779840, 47405487779840,
+STORE, 94049758810112, 94049758945279,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727079718912, 140737488351231,
+ERASE, 140727079718912, 140727079718912,
+STORE, 140727079718912, 140727079723007,
+STORE, 94250996527104, 94250997239807,
+ERASE, 94250996527104, 94250996527104,
+STORE, 94250996527104, 94250996576255,
+STORE, 94250996576256, 94250997239807,
+ERASE, 94250996576256, 94250996576256,
+STORE, 94250996576256, 94250997121023,
+STORE, 94250997121024, 94250997219327,
+STORE, 94250997219328, 94250997239807,
+STORE, 140060022587392, 140060022759423,
+ERASE, 140060022587392, 140060022587392,
+STORE, 140060022587392, 140060022591487,
+STORE, 140060022591488, 140060022759423,
+ERASE, 140060022591488, 140060022591488,
+STORE, 140060022591488, 140060022714367,
+STORE, 140060022714368, 140060022747135,
+STORE, 140060022747136, 140060022755327,
+STORE, 140060022755328, 140060022759423,
+STORE, 140727079788544, 140727079792639,
+STORE, 140727079776256, 140727079788543,
+/* this next one caused issues when lowering the efficiency */
+STORE, 47572772409344, 47572772417535,
+STORE, 47572772417536, 47572772425727,
+STORE, 47572772425728, 47572772532223,
+STORE, 47572772442112, 47572772532223,
+STORE, 47572772425728, 47572772442111,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772515839,
+STORE, 47572772515840, 47572772532223,
+STORE, 47572772495360, 47572772515839,
+STORE, 47572772442112, 47572772495359,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772495359,
+STORE, 47572772511744, 47572772515839,
+STORE, 47572772495360, 47572772511743,
+ERASE, 47572772495360, 47572772495360,
+STORE, 47572772495360, 47572772511743,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772524032, 47572772524032,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772532224, 47572774371327,
+STORE, 47572772671488, 47572774371327,
+STORE, 47572772532224, 47572772671487,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774330367,
+STORE, 47572774330368, 47572774371327,
+STORE, 47572774014976, 47572774330367,
+STORE, 47572772671488, 47572774014975,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774014975,
+STORE, 47572774326272, 47572774330367,
+STORE, 47572774014976, 47572774326271,
+ERASE, 47572774014976, 47572774014976,
+STORE, 47572774014976, 47572774326271,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774354944, 47572774354944,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774354944, 47572774383615,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774346751,
+STORE, 47572774346752, 47572774354943,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772519935,
+STORE, 47572772519936, 47572772524031,
+ERASE, 94250997219328, 94250997219328,
+STORE, 94250997219328, 94250997235711,
+STORE, 94250997235712, 94250997239807,
+ERASE, 140060022747136, 140060022747136,
+STORE, 140060022747136, 140060022751231,
+STORE, 140060022751232, 140060022755327,
+ERASE, 47572772409344, 47572772409344,
+STORE, 94251018305536, 94251018440703,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730012389376, 140737488351231,
+ERASE, 140730012389376, 140730012389376,
+STORE, 140730012389376, 140730012393471,
+STORE, 94382607675392, 94382607695871,
+ERASE, 94382607675392, 94382607675392,
+STORE, 94382607675392, 94382607679487,
+STORE, 94382607679488, 94382607695871,
+ERASE, 94382607679488, 94382607679488,
+STORE, 94382607679488, 94382607683583,
+STORE, 94382607683584, 94382607687679,
+STORE, 94382607687680, 94382607695871,
+STORE, 140252451454976, 140252451627007,
+ERASE, 140252451454976, 140252451454976,
+STORE, 140252451454976, 140252451459071,
+STORE, 140252451459072, 140252451627007,
+ERASE, 140252451459072, 140252451459072,
+STORE, 140252451459072, 140252451581951,
+STORE, 140252451581952, 140252451614719,
+STORE, 140252451614720, 140252451622911,
+STORE, 140252451622912, 140252451627007,
+STORE, 140730013548544, 140730013552639,
+STORE, 140730013536256, 140730013548543,
+STORE, 47380343541760, 47380343549951,
+STORE, 47380343549952, 47380343558143,
+STORE, 47380343558144, 47380345397247,
+STORE, 47380343697408, 47380345397247,
+STORE, 47380343558144, 47380343697407,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345356287,
+STORE, 47380345356288, 47380345397247,
+STORE, 47380345040896, 47380345356287,
+STORE, 47380343697408, 47380345040895,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345040895,
+STORE, 47380345352192, 47380345356287,
+STORE, 47380345040896, 47380345352191,
+ERASE, 47380345040896, 47380345040896,
+STORE, 47380345040896, 47380345352191,
+STORE, 47380345380864, 47380345397247,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345380864, 47380345380864,
+STORE, 47380345380864, 47380345397247,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345372671,
+STORE, 47380345372672, 47380345380863,
+ERASE, 94382607687680, 94382607687680,
+STORE, 94382607687680, 94382607691775,
+STORE, 94382607691776, 94382607695871,
+ERASE, 140252451614720, 140252451614720,
+STORE, 140252451614720, 140252451618815,
+STORE, 140252451618816, 140252451622911,
+ERASE, 47380343541760, 47380343541760,
+STORE, 94382626803712, 94382626938879,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730900271104, 140737488351231,
+ERASE, 140730900271104, 140730900271104,
+STORE, 140730900271104, 140730900275199,
+STORE, 93855478120448, 93855478337535,
+ERASE, 93855478120448, 93855478120448,
+STORE, 93855478120448, 93855478198271,
+STORE, 93855478198272, 93855478337535,
+ERASE, 93855478198272, 93855478198272,
+STORE, 93855478198272, 93855478243327,
+STORE, 93855478243328, 93855478288383,
+STORE, 93855478288384, 93855478337535,
+STORE, 140092686573568, 140092686745599,
+ERASE, 140092686573568, 140092686573568,
+STORE, 140092686573568, 140092686577663,
+STORE, 140092686577664, 140092686745599,
+ERASE, 140092686577664, 140092686577664,
+STORE, 140092686577664, 140092686700543,
+STORE, 140092686700544, 140092686733311,
+STORE, 140092686733312, 140092686741503,
+STORE, 140092686741504, 140092686745599,
+STORE, 140730900537344, 140730900541439,
+STORE, 140730900525056, 140730900537343,
+STORE, 47540108423168, 47540108431359,
+STORE, 47540108431360, 47540108439551,
+STORE, 47540108439552, 47540110278655,
+STORE, 47540108578816, 47540110278655,
+STORE, 47540108439552, 47540108578815,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540110237695,
+STORE, 47540110237696, 47540110278655,
+STORE, 47540109922304, 47540110237695,
+STORE, 47540108578816, 47540109922303,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540109922303,
+STORE, 47540110233600, 47540110237695,
+STORE, 47540109922304, 47540110233599,
+ERASE, 47540109922304, 47540109922304,
+STORE, 47540109922304, 47540110233599,
+STORE, 47540110262272, 47540110278655,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110262272, 47540110262272,
+STORE, 47540110262272, 47540110278655,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110254079,
+STORE, 47540110254080, 47540110262271,
+ERASE, 93855478288384, 93855478288384,
+STORE, 93855478288384, 93855478333439,
+STORE, 93855478333440, 93855478337535,
+ERASE, 140092686733312, 140092686733312,
+STORE, 140092686733312, 140092686737407,
+STORE, 140092686737408, 140092686741503,
+ERASE, 47540108423168, 47540108423168,
+STORE, 93855492222976, 93855492358143,
+STORE, 93855492222976, 93855492493311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733498146816, 140737488351231,
+ERASE, 140733498146816, 140733498146816,
+STORE, 140733498146816, 140733498150911,
+STORE, 94170739654656, 94170740367359,
+ERASE, 94170739654656, 94170739654656,
+STORE, 94170739654656, 94170739703807,
+STORE, 94170739703808, 94170740367359,
+ERASE, 94170739703808, 94170739703808,
+STORE, 94170739703808, 94170740248575,
+STORE, 94170740248576, 94170740346879,
+STORE, 94170740346880, 94170740367359,
+STORE, 140024788877312, 140024789049343,
+ERASE, 140024788877312, 140024788877312,
+STORE, 140024788877312, 140024788881407,
+STORE, 140024788881408, 140024789049343,
+ERASE, 140024788881408, 140024788881408,
+STORE, 140024788881408, 140024789004287,
+STORE, 140024789004288, 140024789037055,
+STORE, 140024789037056, 140024789045247,
+STORE, 140024789045248, 140024789049343,
+STORE, 140733499023360, 140733499027455,
+STORE, 140733499011072, 140733499023359,
+STORE, 47608006119424, 47608006127615,
+STORE, 47608006127616, 47608006135807,
+STORE, 47608006135808, 47608006242303,
+STORE, 47608006152192, 47608006242303,
+STORE, 47608006135808, 47608006152191,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006225919,
+STORE, 47608006225920, 47608006242303,
+STORE, 47608006205440, 47608006225919,
+STORE, 47608006152192, 47608006205439,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006205439,
+STORE, 47608006221824, 47608006225919,
+STORE, 47608006205440, 47608006221823,
+ERASE, 47608006205440, 47608006205440,
+STORE, 47608006205440, 47608006221823,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006234112, 47608006234112,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006242304, 47608008081407,
+STORE, 47608006381568, 47608008081407,
+STORE, 47608006242304, 47608006381567,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608008040447,
+STORE, 47608008040448, 47608008081407,
+STORE, 47608007725056, 47608008040447,
+STORE, 47608006381568, 47608007725055,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608007725055,
+STORE, 47608008036352, 47608008040447,
+STORE, 47608007725056, 47608008036351,
+ERASE, 47608007725056, 47608007725056,
+STORE, 47608007725056, 47608008036351,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008065024, 47608008065024,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008065024, 47608008093695,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008056831,
+STORE, 47608008056832, 47608008065023,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006230015,
+STORE, 47608006230016, 47608006234111,
+ERASE, 94170740346880, 94170740346880,
+STORE, 94170740346880, 94170740363263,
+STORE, 94170740363264, 94170740367359,
+ERASE, 140024789037056, 140024789037056,
+STORE, 140024789037056, 140024789041151,
+STORE, 140024789041152, 140024789045247,
+ERASE, 47608006119424, 47608006119424,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730264326144, 140737488351231,
+ERASE, 140730264326144, 140730264326144,
+STORE, 140730264326144, 140730264330239,
+STORE, 94653216407552, 94653217120255,
+ERASE, 94653216407552, 94653216407552,
+STORE, 94653216407552, 94653216456703,
+STORE, 94653216456704, 94653217120255,
+ERASE, 94653216456704, 94653216456704,
+STORE, 94653216456704, 94653217001471,
+STORE, 94653217001472, 94653217099775,
+STORE, 94653217099776, 94653217120255,
+STORE, 140103617011712, 140103617183743,
+ERASE, 140103617011712, 140103617011712,
+STORE, 140103617011712, 140103617015807,
+STORE, 140103617015808, 140103617183743,
+ERASE, 140103617015808, 140103617015808,
+STORE, 140103617015808, 140103617138687,
+STORE, 140103617138688, 140103617171455,
+STORE, 140103617171456, 140103617179647,
+STORE, 140103617179648, 140103617183743,
+STORE, 140730265427968, 140730265432063,
+STORE, 140730265415680, 140730265427967,
+STORE, 47529177985024, 47529177993215,
+STORE, 47529177993216, 47529178001407,
+STORE, 47529178001408, 47529178107903,
+STORE, 47529178017792, 47529178107903,
+STORE, 47529178001408, 47529178017791,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178091519,
+STORE, 47529178091520, 47529178107903,
+STORE, 47529178071040, 47529178091519,
+STORE, 47529178017792, 47529178071039,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178071039,
+STORE, 47529178087424, 47529178091519,
+STORE, 47529178071040, 47529178087423,
+ERASE, 47529178071040, 47529178071040,
+STORE, 47529178071040, 47529178087423,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178099712, 47529178099712,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178107904, 47529179947007,
+STORE, 47529178247168, 47529179947007,
+STORE, 47529178107904, 47529178247167,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179906047,
+STORE, 47529179906048, 47529179947007,
+STORE, 47529179590656, 47529179906047,
+STORE, 47529178247168, 47529179590655,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179590655,
+STORE, 47529179901952, 47529179906047,
+STORE, 47529179590656, 47529179901951,
+ERASE, 47529179590656, 47529179590656,
+STORE, 47529179590656, 47529179901951,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179930624, 47529179930624,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179930624, 47529179959295,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179922431,
+STORE, 47529179922432, 47529179930623,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178095615,
+STORE, 47529178095616, 47529178099711,
+ERASE, 94653217099776, 94653217099776,
+STORE, 94653217099776, 94653217116159,
+STORE, 94653217116160, 94653217120255,
+ERASE, 140103617171456, 140103617171456,
+STORE, 140103617171456, 140103617175551,
+STORE, 140103617175552, 140103617179647,
+ERASE, 47529177985024, 47529177985024,
+STORE, 94653241135104, 94653241270271,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736284549120, 140737488351231,
+ERASE, 140736284549120, 140736284549120,
+STORE, 140736284549120, 140736284553215,
+STORE, 93963663822848, 93963664506879,
+ERASE, 93963663822848, 93963663822848,
+STORE, 93963663822848, 93963663884287,
+STORE, 93963663884288, 93963664506879,
+ERASE, 93963663884288, 93963663884288,
+STORE, 93963663884288, 93963664240639,
+STORE, 93963664240640, 93963664379903,
+STORE, 93963664379904, 93963664506879,
+STORE, 140450188439552, 140450188611583,
+ERASE, 140450188439552, 140450188439552,
+STORE, 140450188439552, 140450188443647,
+STORE, 140450188443648, 140450188611583,
+ERASE, 140450188443648, 140450188443648,
+STORE, 140450188443648, 140450188566527,
+STORE, 140450188566528, 140450188599295,
+STORE, 140450188599296, 140450188607487,
+STORE, 140450188607488, 140450188611583,
+STORE, 140736284577792, 140736284581887,
+STORE, 140736284565504, 140736284577791,
+STORE, 47182606557184, 47182606565375,
+STORE, 47182606565376, 47182606573567,
+STORE, 47182606573568, 47182608412671,
+STORE, 47182606712832, 47182608412671,
+STORE, 47182606573568, 47182606712831,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608371711,
+STORE, 47182608371712, 47182608412671,
+STORE, 47182608056320, 47182608371711,
+STORE, 47182606712832, 47182608056319,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608056319,
+STORE, 47182608367616, 47182608371711,
+STORE, 47182608056320, 47182608367615,
+ERASE, 47182608056320, 47182608056320,
+STORE, 47182608056320, 47182608367615,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608396288, 47182608396288,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608412672, 47182608523263,
+STORE, 47182608429056, 47182608523263,
+STORE, 47182608412672, 47182608429055,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608515071,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608490496, 47182608515071,
+STORE, 47182608429056, 47182608490495,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608490495,
+STORE, 47182608510976, 47182608515071,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608490496, 47182608490496,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608523264, 47182608568319,
+ERASE, 47182608523264, 47182608523264,
+STORE, 47182608523264, 47182608531455,
+STORE, 47182608531456, 47182608568319,
+STORE, 47182608551936, 47182608568319,
+STORE, 47182608531456, 47182608551935,
+ERASE, 47182608531456, 47182608531456,
+STORE, 47182608531456, 47182608551935,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608551936, 47182608560127,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608568319,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608560127,
+STORE, 47182608560128, 47182608568319,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608568320, 47182608916479,
+STORE, 47182608609280, 47182608916479,
+STORE, 47182608568320, 47182608609279,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608891903,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608822272, 47182608891903,
+STORE, 47182608609280, 47182608822271,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608822271,
+STORE, 47182608887808, 47182608891903,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608822272, 47182608822272,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608916480, 47182611177471,
+STORE, 47182609068032, 47182611177471,
+STORE, 47182608916480, 47182609068031,
+ERASE, 47182609068032, 47182609068032,
+STORE, 47182609068032, 47182611161087,
+STORE, 47182611161088, 47182611177471,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611169280, 47182611169280,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611177472, 47182611312639,
+ERASE, 47182611177472, 47182611177472,
+STORE, 47182611177472, 47182611202047,
+STORE, 47182611202048, 47182611312639,
+STORE, 47182611263488, 47182611312639,
+STORE, 47182611202048, 47182611263487,
+ERASE, 47182611202048, 47182611202048,
+STORE, 47182611202048, 47182611263487,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611263488, 47182611288063,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611312639,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611288063,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611296256, 47182611296256,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611296256, 47182611320831,
+STORE, 47182611320832, 47182611484671,
+ERASE, 47182611320832, 47182611320832,
+STORE, 47182611320832, 47182611333119,
+STORE, 47182611333120, 47182611484671,
+STORE, 47182611431424, 47182611484671,
+STORE, 47182611333120, 47182611431423,
+ERASE, 47182611333120, 47182611333120,
+STORE, 47182611333120, 47182611431423,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611431424, 47182611476479,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611484671,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611476479,
+STORE, 47182611476480, 47182611484671,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611484672, 47182612082687,
+STORE, 47182611603456, 47182612082687,
+STORE, 47182611484672, 47182611603455,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182612029439,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182611918848, 47182612029439,
+STORE, 47182611603456, 47182611918847,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182611918847,
+STORE, 47182612025344, 47182612029439,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182611918848, 47182611918848,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182612082688, 47182615134207,
+STORE, 47182612627456, 47182615134207,
+STORE, 47182612082688, 47182612627455,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614913023,
+STORE, 47182614913024, 47182615134207,
+STORE, 47182614323200, 47182614913023,
+STORE, 47182612627456, 47182614323199,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614323199,
+STORE, 47182614908928, 47182614913023,
+STORE, 47182614323200, 47182614908927,
+ERASE, 47182614323200, 47182614323200,
+STORE, 47182614323200, 47182614908927,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182615117824, 47182615117824,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182615134208, 47182615166975,
+ERASE, 47182615134208, 47182615134208,
+STORE, 47182615134208, 47182615142399,
+STORE, 47182615142400, 47182615166975,
+STORE, 47182615154688, 47182615166975,
+STORE, 47182615142400, 47182615154687,
+ERASE, 47182615142400, 47182615142400,
+STORE, 47182615142400, 47182615154687,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615154688, 47182615158783,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615166975,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615158783,
+STORE, 47182615158784, 47182615166975,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615166976, 47182615203839,
+ERASE, 47182615166976, 47182615166976,
+STORE, 47182615166976, 47182615175167,
+STORE, 47182615175168, 47182615203839,
+STORE, 47182615191552, 47182615203839,
+STORE, 47182615175168, 47182615191551,
+ERASE, 47182615175168, 47182615175168,
+STORE, 47182615175168, 47182615191551,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615191552, 47182615195647,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615203839,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615195647,
+STORE, 47182615195648, 47182615203839,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615203840, 47182615678975,
+ERASE, 47182615203840, 47182615203840,
+STORE, 47182615203840, 47182615212031,
+STORE, 47182615212032, 47182615678975,
+STORE, 47182615547904, 47182615678975,
+STORE, 47182615212032, 47182615547903,
+ERASE, 47182615212032, 47182615212032,
+STORE, 47182615212032, 47182615547903,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615547904, 47182615670783,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615678975,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615670783,
+STORE, 47182615670784, 47182615678975,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615678976, 47182615687167,
+STORE, 47182615687168, 47182615707647,
+ERASE, 47182615687168, 47182615687168,
+STORE, 47182615687168, 47182615691263,
+STORE, 47182615691264, 47182615707647,
+STORE, 47182615695360, 47182615707647,
+STORE, 47182615691264, 47182615695359,
+ERASE, 47182615691264, 47182615691264,
+STORE, 47182615691264, 47182615695359,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615695360, 47182615699455,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615707647,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615699455,
+STORE, 47182615699456, 47182615707647,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615707648, 47182615715839,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608388095,
+STORE, 47182608388096, 47182608396287,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615703551,
+STORE, 47182615703552, 47182615707647,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611292159,
+STORE, 47182611292160, 47182611296255,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615674879,
+STORE, 47182615674880, 47182615678975,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615199743,
+STORE, 47182615199744, 47182615203839,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615162879,
+STORE, 47182615162880, 47182615166975,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615109631,
+STORE, 47182615109632, 47182615117823,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612066303,
+STORE, 47182612066304, 47182612082687,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611480575,
+STORE, 47182611480576, 47182611484671,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611165183,
+STORE, 47182611165184, 47182611169279,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608912383,
+STORE, 47182608912384, 47182608916479,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608564223,
+STORE, 47182608564224, 47182608568319,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608519167,
+STORE, 47182608519168, 47182608523263,
+ERASE, 93963664379904, 93963664379904,
+STORE, 93963664379904, 93963664502783,
+STORE, 93963664502784, 93963664506879,
+ERASE, 140450188599296, 140450188599296,
+STORE, 140450188599296, 140450188603391,
+STORE, 140450188603392, 140450188607487,
+ERASE, 47182606557184, 47182606557184,
+STORE, 93963694723072, 93963694858239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730313261056, 140737488351231,
+ERASE, 140730313261056, 140730313261056,
+STORE, 140730313261056, 140730313265151,
+STORE, 94386579017728, 94386579697663,
+ERASE, 94386579017728, 94386579017728,
+STORE, 94386579017728, 94386579083263,
+STORE, 94386579083264, 94386579697663,
+ERASE, 94386579083264, 94386579083264,
+STORE, 94386579083264, 94386579431423,
+STORE, 94386579431424, 94386579570687,
+STORE, 94386579570688, 94386579697663,
+STORE, 140124810838016, 140124811010047,
+ERASE, 140124810838016, 140124810838016,
+STORE, 140124810838016, 140124810842111,
+STORE, 140124810842112, 140124811010047,
+ERASE, 140124810842112, 140124810842112,
+STORE, 140124810842112, 140124810964991,
+STORE, 140124810964992, 140124810997759,
+STORE, 140124810997760, 140124811005951,
+STORE, 140124811005952, 140124811010047,
+STORE, 140730313601024, 140730313605119,
+STORE, 140730313588736, 140730313601023,
+STORE, 47507984158720, 47507984166911,
+STORE, 47507984166912, 47507984175103,
+STORE, 47507984175104, 47507986014207,
+STORE, 47507984314368, 47507986014207,
+STORE, 47507984175104, 47507984314367,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985973247,
+STORE, 47507985973248, 47507986014207,
+STORE, 47507985657856, 47507985973247,
+STORE, 47507984314368, 47507985657855,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985657855,
+STORE, 47507985969152, 47507985973247,
+STORE, 47507985657856, 47507985969151,
+ERASE, 47507985657856, 47507985657856,
+STORE, 47507985657856, 47507985969151,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985997824, 47507985997824,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507986014208, 47507986124799,
+STORE, 47507986030592, 47507986124799,
+STORE, 47507986014208, 47507986030591,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986116607,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986092032, 47507986116607,
+STORE, 47507986030592, 47507986092031,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986092031,
+STORE, 47507986112512, 47507986116607,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986092032, 47507986092032,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986124800, 47507986169855,
+ERASE, 47507986124800, 47507986124800,
+STORE, 47507986124800, 47507986132991,
+STORE, 47507986132992, 47507986169855,
+STORE, 47507986153472, 47507986169855,
+STORE, 47507986132992, 47507986153471,
+ERASE, 47507986132992, 47507986132992,
+STORE, 47507986132992, 47507986153471,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986153472, 47507986161663,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986169855,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986161663,
+STORE, 47507986161664, 47507986169855,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986169856, 47507986518015,
+STORE, 47507986210816, 47507986518015,
+STORE, 47507986169856, 47507986210815,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986493439,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986423808, 47507986493439,
+STORE, 47507986210816, 47507986423807,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986423807,
+STORE, 47507986489344, 47507986493439,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986423808, 47507986423808,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986518016, 47507988779007,
+STORE, 47507986669568, 47507988779007,
+STORE, 47507986518016, 47507986669567,
+ERASE, 47507986669568, 47507986669568,
+STORE, 47507986669568, 47507988762623,
+STORE, 47507988762624, 47507988779007,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988770816, 47507988770816,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988779008, 47507988914175,
+ERASE, 47507988779008, 47507988779008,
+STORE, 47507988779008, 47507988803583,
+STORE, 47507988803584, 47507988914175,
+STORE, 47507988865024, 47507988914175,
+STORE, 47507988803584, 47507988865023,
+ERASE, 47507988803584, 47507988803584,
+STORE, 47507988803584, 47507988865023,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988865024, 47507988889599,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988914175,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988889599,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988897792, 47507988897792,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988897792, 47507988922367,
+STORE, 47507988922368, 47507989086207,
+ERASE, 47507988922368, 47507988922368,
+STORE, 47507988922368, 47507988934655,
+STORE, 47507988934656, 47507989086207,
+STORE, 47507989032960, 47507989086207,
+STORE, 47507988934656, 47507989032959,
+ERASE, 47507988934656, 47507988934656,
+STORE, 47507988934656, 47507989032959,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989032960, 47507989078015,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989086207,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989078015,
+STORE, 47507989078016, 47507989086207,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989086208, 47507989684223,
+STORE, 47507989204992, 47507989684223,
+STORE, 47507989086208, 47507989204991,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989630975,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989520384, 47507989630975,
+STORE, 47507989204992, 47507989520383,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989520383,
+STORE, 47507989626880, 47507989630975,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989520384, 47507989520384,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989684224, 47507992735743,
+STORE, 47507990228992, 47507992735743,
+STORE, 47507989684224, 47507990228991,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507992514559,
+STORE, 47507992514560, 47507992735743,
+STORE, 47507991924736, 47507992514559,
+STORE, 47507990228992, 47507991924735,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507991924735,
+STORE, 47507992510464, 47507992514559,
+STORE, 47507991924736, 47507992510463,
+ERASE, 47507991924736, 47507991924736,
+STORE, 47507991924736, 47507992510463,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992719360, 47507992719360,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992735744, 47507992768511,
+ERASE, 47507992735744, 47507992735744,
+STORE, 47507992735744, 47507992743935,
+STORE, 47507992743936, 47507992768511,
+STORE, 47507992756224, 47507992768511,
+STORE, 47507992743936, 47507992756223,
+ERASE, 47507992743936, 47507992743936,
+STORE, 47507992743936, 47507992756223,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992756224, 47507992760319,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992768511,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992760319,
+STORE, 47507992760320, 47507992768511,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992768512, 47507992805375,
+ERASE, 47507992768512, 47507992768512,
+STORE, 47507992768512, 47507992776703,
+STORE, 47507992776704, 47507992805375,
+STORE, 47507992793088, 47507992805375,
+STORE, 47507992776704, 47507992793087,
+ERASE, 47507992776704, 47507992776704,
+STORE, 47507992776704, 47507992793087,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992793088, 47507992797183,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992805375,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992797183,
+STORE, 47507992797184, 47507992805375,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992805376, 47507993280511,
+ERASE, 47507992805376, 47507992805376,
+STORE, 47507992805376, 47507992813567,
+STORE, 47507992813568, 47507993280511,
+STORE, 47507993149440, 47507993280511,
+STORE, 47507992813568, 47507993149439,
+ERASE, 47507992813568, 47507992813568,
+STORE, 47507992813568, 47507993149439,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993149440, 47507993272319,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993280511,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993272319,
+STORE, 47507993272320, 47507993280511,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993280512, 47507993288703,
+STORE, 47507993288704, 47507993309183,
+ERASE, 47507993288704, 47507993288704,
+STORE, 47507993288704, 47507993292799,
+STORE, 47507993292800, 47507993309183,
+STORE, 47507993296896, 47507993309183,
+STORE, 47507993292800, 47507993296895,
+ERASE, 47507993292800, 47507993292800,
+STORE, 47507993292800, 47507993296895,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993296896, 47507993300991,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993309183,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993300991,
+STORE, 47507993300992, 47507993309183,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993309184, 47507993317375,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985989631,
+STORE, 47507985989632, 47507985997823,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993305087,
+STORE, 47507993305088, 47507993309183,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988893695,
+STORE, 47507988893696, 47507988897791,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993276415,
+STORE, 47507993276416, 47507993280511,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992801279,
+STORE, 47507992801280, 47507992805375,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992764415,
+STORE, 47507992764416, 47507992768511,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992711167,
+STORE, 47507992711168, 47507992719359,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989667839,
+STORE, 47507989667840, 47507989684223,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989082111,
+STORE, 47507989082112, 47507989086207,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988766719,
+STORE, 47507988766720, 47507988770815,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986513919,
+STORE, 47507986513920, 47507986518015,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986165759,
+STORE, 47507986165760, 47507986169855,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986120703,
+STORE, 47507986120704, 47507986124799,
+ERASE, 94386579570688, 94386579570688,
+STORE, 94386579570688, 94386579693567,
+STORE, 94386579693568, 94386579697663,
+ERASE, 140124810997760, 140124810997760,
+STORE, 140124810997760, 140124811001855,
+STORE, 140124811001856, 140124811005951,
+ERASE, 47507984158720, 47507984158720,
+STORE, 94386583982080, 94386584117247,
+STORE, 94386583982080, 94386584256511,
+ERASE, 94386583982080, 94386583982080,
+STORE, 94386583982080, 94386584223743,
+STORE, 94386584223744, 94386584256511,
+ERASE, 94386584223744, 94386584223744,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733763395584, 140737488351231,
+ERASE, 140733763395584, 140733763395584,
+STORE, 140733763395584, 140733763399679,
+STORE, 94011546472448, 94011547152383,
+ERASE, 94011546472448, 94011546472448,
+STORE, 94011546472448, 94011546537983,
+STORE, 94011546537984, 94011547152383,
+ERASE, 94011546537984, 94011546537984,
+STORE, 94011546537984, 94011546886143,
+STORE, 94011546886144, 94011547025407,
+STORE, 94011547025408, 94011547152383,
+STORE, 139757597949952, 139757598121983,
+ERASE, 139757597949952, 139757597949952,
+STORE, 139757597949952, 139757597954047,
+STORE, 139757597954048, 139757598121983,
+ERASE, 139757597954048, 139757597954048,
+STORE, 139757597954048, 139757598076927,
+STORE, 139757598076928, 139757598109695,
+STORE, 139757598109696, 139757598117887,
+STORE, 139757598117888, 139757598121983,
+STORE, 140733763596288, 140733763600383,
+STORE, 140733763584000, 140733763596287,
+STORE, 47875197046784, 47875197054975,
+STORE, 47875197054976, 47875197063167,
+STORE, 47875197063168, 47875198902271,
+STORE, 47875197202432, 47875198902271,
+STORE, 47875197063168, 47875197202431,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198861311,
+STORE, 47875198861312, 47875198902271,
+STORE, 47875198545920, 47875198861311,
+STORE, 47875197202432, 47875198545919,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198545919,
+STORE, 47875198857216, 47875198861311,
+STORE, 47875198545920, 47875198857215,
+ERASE, 47875198545920, 47875198545920,
+STORE, 47875198545920, 47875198857215,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198885888, 47875198885888,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198902272, 47875199012863,
+STORE, 47875198918656, 47875199012863,
+STORE, 47875198902272, 47875198918655,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875199004671,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875198980096, 47875199004671,
+STORE, 47875198918656, 47875198980095,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875198980095,
+STORE, 47875199000576, 47875199004671,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875198980096, 47875198980096,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875199012864, 47875199057919,
+ERASE, 47875199012864, 47875199012864,
+STORE, 47875199012864, 47875199021055,
+STORE, 47875199021056, 47875199057919,
+STORE, 47875199041536, 47875199057919,
+STORE, 47875199021056, 47875199041535,
+ERASE, 47875199021056, 47875199021056,
+STORE, 47875199021056, 47875199041535,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199041536, 47875199049727,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199057919,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199049727,
+STORE, 47875199049728, 47875199057919,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199057920, 47875199406079,
+STORE, 47875199098880, 47875199406079,
+STORE, 47875199057920, 47875199098879,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199381503,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199311872, 47875199381503,
+STORE, 47875199098880, 47875199311871,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199311871,
+STORE, 47875199377408, 47875199381503,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199311872, 47875199311872,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199406080, 47875201667071,
+STORE, 47875199557632, 47875201667071,
+STORE, 47875199406080, 47875199557631,
+ERASE, 47875199557632, 47875199557632,
+STORE, 47875199557632, 47875201650687,
+STORE, 47875201650688, 47875201667071,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201658880, 47875201658880,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201667072, 47875201802239,
+ERASE, 47875201667072, 47875201667072,
+STORE, 47875201667072, 47875201691647,
+STORE, 47875201691648, 47875201802239,
+STORE, 47875201753088, 47875201802239,
+STORE, 47875201691648, 47875201753087,
+ERASE, 47875201691648, 47875201691648,
+STORE, 47875201691648, 47875201753087,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201753088, 47875201777663,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201802239,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201777663,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201785856, 47875201785856,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201785856, 47875201810431,
+STORE, 47875201810432, 47875201974271,
+ERASE, 47875201810432, 47875201810432,
+STORE, 47875201810432, 47875201822719,
+STORE, 47875201822720, 47875201974271,
+STORE, 47875201921024, 47875201974271,
+STORE, 47875201822720, 47875201921023,
+ERASE, 47875201822720, 47875201822720,
+STORE, 47875201822720, 47875201921023,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201921024, 47875201966079,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201974271,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201966079,
+STORE, 47875201966080, 47875201974271,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201974272, 47875202572287,
+STORE, 47875202093056, 47875202572287,
+STORE, 47875201974272, 47875202093055,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202519039,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202408448, 47875202519039,
+STORE, 47875202093056, 47875202408447,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202408447,
+STORE, 47875202514944, 47875202519039,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202408448, 47875202408448,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202572288, 47875205623807,
+STORE, 47875203117056, 47875205623807,
+STORE, 47875202572288, 47875203117055,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875205402623,
+STORE, 47875205402624, 47875205623807,
+STORE, 47875204812800, 47875205402623,
+STORE, 47875203117056, 47875204812799,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875204812799,
+STORE, 47875205398528, 47875205402623,
+STORE, 47875204812800, 47875205398527,
+ERASE, 47875204812800, 47875204812800,
+STORE, 47875204812800, 47875205398527,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205607424, 47875205607424,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205623808, 47875205656575,
+ERASE, 47875205623808, 47875205623808,
+STORE, 47875205623808, 47875205631999,
+STORE, 47875205632000, 47875205656575,
+STORE, 47875205644288, 47875205656575,
+STORE, 47875205632000, 47875205644287,
+ERASE, 47875205632000, 47875205632000,
+STORE, 47875205632000, 47875205644287,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205644288, 47875205648383,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205656575,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205648383,
+STORE, 47875205648384, 47875205656575,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205656576, 47875205693439,
+ERASE, 47875205656576, 47875205656576,
+STORE, 47875205656576, 47875205664767,
+STORE, 47875205664768, 47875205693439,
+STORE, 47875205681152, 47875205693439,
+STORE, 47875205664768, 47875205681151,
+ERASE, 47875205664768, 47875205664768,
+STORE, 47875205664768, 47875205681151,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205681152, 47875205685247,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205693439,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205685247,
+STORE, 47875205685248, 47875205693439,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205693440, 47875206168575,
+ERASE, 47875205693440, 47875205693440,
+STORE, 47875205693440, 47875205701631,
+STORE, 47875205701632, 47875206168575,
+STORE, 47875206037504, 47875206168575,
+STORE, 47875205701632, 47875206037503,
+ERASE, 47875205701632, 47875205701632,
+STORE, 47875205701632, 47875206037503,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206037504, 47875206160383,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206168575,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206160383,
+STORE, 47875206160384, 47875206168575,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206168576, 47875206176767,
+STORE, 47875206176768, 47875206197247,
+ERASE, 47875206176768, 47875206176768,
+STORE, 47875206176768, 47875206180863,
+STORE, 47875206180864, 47875206197247,
+STORE, 47875206184960, 47875206197247,
+STORE, 47875206180864, 47875206184959,
+ERASE, 47875206180864, 47875206180864,
+STORE, 47875206180864, 47875206184959,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206184960, 47875206189055,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206197247,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206189055,
+STORE, 47875206189056, 47875206197247,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206197248, 47875206205439,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198877695,
+STORE, 47875198877696, 47875198885887,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206193151,
+STORE, 47875206193152, 47875206197247,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201781759,
+STORE, 47875201781760, 47875201785855,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206164479,
+STORE, 47875206164480, 47875206168575,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205689343,
+STORE, 47875205689344, 47875205693439,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205652479,
+STORE, 47875205652480, 47875205656575,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205599231,
+STORE, 47875205599232, 47875205607423,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202555903,
+STORE, 47875202555904, 47875202572287,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201970175,
+STORE, 47875201970176, 47875201974271,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201654783,
+STORE, 47875201654784, 47875201658879,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199401983,
+STORE, 47875199401984, 47875199406079,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199053823,
+STORE, 47875199053824, 47875199057919,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199008767,
+STORE, 47875199008768, 47875199012863,
+ERASE, 94011547025408, 94011547025408,
+STORE, 94011547025408, 94011547148287,
+STORE, 94011547148288, 94011547152383,
+ERASE, 139757598109696, 139757598109696,
+STORE, 139757598109696, 139757598113791,
+STORE, 139757598113792, 139757598117887,
+ERASE, 47875197046784, 47875197046784,
+STORE, 94011557584896, 94011557720063,
+STORE, 94011557584896, 94011557855231,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557851135,
+STORE, 94011557851136, 94011557855231,
+ERASE, 94011557851136, 94011557851136,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557847039,
+STORE, 94011557847040, 94011557851135,
+ERASE, 94011557847040, 94011557847040,
+STORE, 94011557584896, 94011557982207,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557978111,
+STORE, 94011557978112, 94011557982207,
+ERASE, 94011557978112, 94011557978112,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557974015,
+STORE, 94011557974016, 94011557978111,
+ERASE, 94011557974016, 94011557974016,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734130360320, 140737488351231,
+ERASE, 140734130360320, 140734130360320,
+STORE, 140734130360320, 140734130364415,
+STORE, 94641232105472, 94641232785407,
+ERASE, 94641232105472, 94641232105472,
+STORE, 94641232105472, 94641232171007,
+STORE, 94641232171008, 94641232785407,
+ERASE, 94641232171008, 94641232171008,
+STORE, 94641232171008, 94641232519167,
+STORE, 94641232519168, 94641232658431,
+STORE, 94641232658432, 94641232785407,
+STORE, 139726599516160, 139726599688191,
+ERASE, 139726599516160, 139726599516160,
+STORE, 139726599516160, 139726599520255,
+STORE, 139726599520256, 139726599688191,
+ERASE, 139726599520256, 139726599520256,
+STORE, 139726599520256, 139726599643135,
+STORE, 139726599643136, 139726599675903,
+STORE, 139726599675904, 139726599684095,
+STORE, 139726599684096, 139726599688191,
+STORE, 140734130446336, 140734130450431,
+STORE, 140734130434048, 140734130446335,
+STORE, 47906195480576, 47906195488767,
+STORE, 47906195488768, 47906195496959,
+STORE, 47906195496960, 47906197336063,
+STORE, 47906195636224, 47906197336063,
+STORE, 47906195496960, 47906195636223,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906197295103,
+STORE, 47906197295104, 47906197336063,
+STORE, 47906196979712, 47906197295103,
+STORE, 47906195636224, 47906196979711,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906196979711,
+STORE, 47906197291008, 47906197295103,
+STORE, 47906196979712, 47906197291007,
+ERASE, 47906196979712, 47906196979712,
+STORE, 47906196979712, 47906197291007,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197319680, 47906197319680,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197336064, 47906197446655,
+STORE, 47906197352448, 47906197446655,
+STORE, 47906197336064, 47906197352447,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197438463,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197413888, 47906197438463,
+STORE, 47906197352448, 47906197413887,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197413887,
+STORE, 47906197434368, 47906197438463,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197413888, 47906197413888,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197446656, 47906197491711,
+ERASE, 47906197446656, 47906197446656,
+STORE, 47906197446656, 47906197454847,
+STORE, 47906197454848, 47906197491711,
+STORE, 47906197475328, 47906197491711,
+STORE, 47906197454848, 47906197475327,
+ERASE, 47906197454848, 47906197454848,
+STORE, 47906197454848, 47906197475327,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197475328, 47906197483519,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197491711,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197483519,
+STORE, 47906197483520, 47906197491711,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197491712, 47906197839871,
+STORE, 47906197532672, 47906197839871,
+STORE, 47906197491712, 47906197532671,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197815295,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197745664, 47906197815295,
+STORE, 47906197532672, 47906197745663,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197745663,
+STORE, 47906197811200, 47906197815295,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197745664, 47906197745664,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197839872, 47906200100863,
+STORE, 47906197991424, 47906200100863,
+STORE, 47906197839872, 47906197991423,
+ERASE, 47906197991424, 47906197991424,
+STORE, 47906197991424, 47906200084479,
+STORE, 47906200084480, 47906200100863,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200092672, 47906200092672,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200100864, 47906200236031,
+ERASE, 47906200100864, 47906200100864,
+STORE, 47906200100864, 47906200125439,
+STORE, 47906200125440, 47906200236031,
+STORE, 47906200186880, 47906200236031,
+STORE, 47906200125440, 47906200186879,
+ERASE, 47906200125440, 47906200125440,
+STORE, 47906200125440, 47906200186879,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200186880, 47906200211455,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200236031,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200211455,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200219648, 47906200219648,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200219648, 47906200244223,
+STORE, 47906200244224, 47906200408063,
+ERASE, 47906200244224, 47906200244224,
+STORE, 47906200244224, 47906200256511,
+STORE, 47906200256512, 47906200408063,
+STORE, 47906200354816, 47906200408063,
+STORE, 47906200256512, 47906200354815,
+ERASE, 47906200256512, 47906200256512,
+STORE, 47906200256512, 47906200354815,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200354816, 47906200399871,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200408063,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200399871,
+STORE, 47906200399872, 47906200408063,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200408064, 47906201006079,
+STORE, 47906200526848, 47906201006079,
+STORE, 47906200408064, 47906200526847,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200952831,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906200842240, 47906200952831,
+STORE, 47906200526848, 47906200842239,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200842239,
+STORE, 47906200948736, 47906200952831,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200842240, 47906200842240,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906201006080, 47906204057599,
+STORE, 47906201550848, 47906204057599,
+STORE, 47906201006080, 47906201550847,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203836415,
+STORE, 47906203836416, 47906204057599,
+STORE, 47906203246592, 47906203836415,
+STORE, 47906201550848, 47906203246591,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203246591,
+STORE, 47906203832320, 47906203836415,
+STORE, 47906203246592, 47906203832319,
+ERASE, 47906203246592, 47906203246592,
+STORE, 47906203246592, 47906203832319,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906204041216, 47906204041216,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906204057600, 47906204090367,
+ERASE, 47906204057600, 47906204057600,
+STORE, 47906204057600, 47906204065791,
+STORE, 47906204065792, 47906204090367,
+STORE, 47906204078080, 47906204090367,
+STORE, 47906204065792, 47906204078079,
+ERASE, 47906204065792, 47906204065792,
+STORE, 47906204065792, 47906204078079,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204078080, 47906204082175,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204090367,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204082175,
+STORE, 47906204082176, 47906204090367,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204090368, 47906204127231,
+ERASE, 47906204090368, 47906204090368,
+STORE, 47906204090368, 47906204098559,
+STORE, 47906204098560, 47906204127231,
+STORE, 47906204114944, 47906204127231,
+STORE, 47906204098560, 47906204114943,
+ERASE, 47906204098560, 47906204098560,
+STORE, 47906204098560, 47906204114943,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204114944, 47906204119039,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204127231,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204119039,
+STORE, 47906204119040, 47906204127231,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204127232, 47906204602367,
+ERASE, 47906204127232, 47906204127232,
+STORE, 47906204127232, 47906204135423,
+STORE, 47906204135424, 47906204602367,
+STORE, 47906204471296, 47906204602367,
+STORE, 47906204135424, 47906204471295,
+ERASE, 47906204135424, 47906204135424,
+STORE, 47906204135424, 47906204471295,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204471296, 47906204594175,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204602367,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204594175,
+STORE, 47906204594176, 47906204602367,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204602368, 47906204610559,
+STORE, 47906204610560, 47906204631039,
+ERASE, 47906204610560, 47906204610560,
+STORE, 47906204610560, 47906204614655,
+STORE, 47906204614656, 47906204631039,
+STORE, 47906204618752, 47906204631039,
+STORE, 47906204614656, 47906204618751,
+ERASE, 47906204614656, 47906204614656,
+STORE, 47906204614656, 47906204618751,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204618752, 47906204622847,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204631039,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204622847,
+STORE, 47906204622848, 47906204631039,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204631040, 47906204639231,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197311487,
+STORE, 47906197311488, 47906197319679,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204626943,
+STORE, 47906204626944, 47906204631039,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200215551,
+STORE, 47906200215552, 47906200219647,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204598271,
+STORE, 47906204598272, 47906204602367,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204123135,
+STORE, 47906204123136, 47906204127231,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204086271,
+STORE, 47906204086272, 47906204090367,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204033023,
+STORE, 47906204033024, 47906204041215,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906200989695,
+STORE, 47906200989696, 47906201006079,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200403967,
+STORE, 47906200403968, 47906200408063,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200088575,
+STORE, 47906200088576, 47906200092671,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197835775,
+STORE, 47906197835776, 47906197839871,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197487615,
+STORE, 47906197487616, 47906197491711,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197442559,
+STORE, 47906197442560, 47906197446655,
+ERASE, 94641232658432, 94641232658432,
+STORE, 94641232658432, 94641232781311,
+STORE, 94641232781312, 94641232785407,
+ERASE, 139726599675904, 139726599675904,
+STORE, 139726599675904, 139726599679999,
+STORE, 139726599680000, 139726599684095,
+ERASE, 47906195480576, 47906195480576,
+STORE, 94641242615808, 94641242750975,
+ };
+
+ unsigned long set10[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736427839488, 140737488351231,
+ERASE, 140736427839488, 140736427839488,
+STORE, 140736427839488, 140736427843583,
+STORE, 94071213395968, 94071213567999,
+ERASE, 94071213395968, 94071213395968,
+STORE, 94071213395968, 94071213412351,
+STORE, 94071213412352, 94071213567999,
+ERASE, 94071213412352, 94071213412352,
+STORE, 94071213412352, 94071213514751,
+STORE, 94071213514752, 94071213555711,
+STORE, 94071213555712, 94071213567999,
+STORE, 139968410644480, 139968410816511,
+ERASE, 139968410644480, 139968410644480,
+STORE, 139968410644480, 139968410648575,
+STORE, 139968410648576, 139968410816511,
+ERASE, 139968410648576, 139968410648576,
+STORE, 139968410648576, 139968410771455,
+STORE, 139968410771456, 139968410804223,
+STORE, 139968410804224, 139968410812415,
+STORE, 139968410812416, 139968410816511,
+STORE, 140736429277184, 140736429281279,
+STORE, 140736429264896, 140736429277183,
+STORE, 47664384352256, 47664384360447,
+STORE, 47664384360448, 47664384368639,
+STORE, 47664384368640, 47664384532479,
+ERASE, 47664384368640, 47664384368640,
+STORE, 47664384368640, 47664384380927,
+STORE, 47664384380928, 47664384532479,
+STORE, 47664384479232, 47664384532479,
+STORE, 47664384380928, 47664384479231,
+ERASE, 47664384380928, 47664384380928,
+STORE, 47664384380928, 47664384479231,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384479232, 47664384524287,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384532479,
+ERASE, 47664384479232, 47664384479232,
+STORE, 47664384479232, 47664384524287,
+STORE, 47664384524288, 47664384532479,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384532479,
+STORE, 47664384532480, 47664387583999,
+STORE, 47664385077248, 47664387583999,
+STORE, 47664384532480, 47664385077247,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664387362815,
+STORE, 47664387362816, 47664387583999,
+STORE, 47664386772992, 47664387362815,
+STORE, 47664385077248, 47664386772991,
+ERASE, 47664385077248, 47664385077248,
+STORE, 47664385077248, 47664386772991,
+STORE, 47664387358720, 47664387362815,
+STORE, 47664386772992, 47664387358719,
+ERASE, 47664386772992, 47664386772992,
+STORE, 47664386772992, 47664387358719,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387567615,
+ERASE, 47664387567616, 47664387567616,
+STORE, 47664387567616, 47664387583999,
+STORE, 47664387584000, 47664389423103,
+STORE, 47664387723264, 47664389423103,
+STORE, 47664387584000, 47664387723263,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389382143,
+STORE, 47664389382144, 47664389423103,
+STORE, 47664389066752, 47664389382143,
+STORE, 47664387723264, 47664389066751,
+ERASE, 47664387723264, 47664387723264,
+STORE, 47664387723264, 47664389066751,
+STORE, 47664389378048, 47664389382143,
+STORE, 47664389066752, 47664389378047,
+ERASE, 47664389066752, 47664389066752,
+STORE, 47664389066752, 47664389378047,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389406719,
+ERASE, 47664389406720, 47664389406720,
+STORE, 47664389406720, 47664389423103,
+STORE, 47664389423104, 47664389558271,
+ERASE, 47664389423104, 47664389423104,
+STORE, 47664389423104, 47664389447679,
+STORE, 47664389447680, 47664389558271,
+STORE, 47664389509120, 47664389558271,
+STORE, 47664389447680, 47664389509119,
+ERASE, 47664389447680, 47664389447680,
+STORE, 47664389447680, 47664389509119,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389509120, 47664389533695,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389558271,
+ERASE, 47664389509120, 47664389509120,
+STORE, 47664389509120, 47664389533695,
+STORE, 47664389533696, 47664389558271,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389541887,
+ERASE, 47664389541888, 47664389541888,
+STORE, 47664389541888, 47664389558271,
+STORE, 47664389558272, 47664389578751,
+ERASE, 47664389558272, 47664389558272,
+STORE, 47664389558272, 47664389562367,
+STORE, 47664389562368, 47664389578751,
+STORE, 47664389566464, 47664389578751,
+STORE, 47664389562368, 47664389566463,
+ERASE, 47664389562368, 47664389562368,
+STORE, 47664389562368, 47664389566463,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389566464, 47664389570559,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389578751,
+ERASE, 47664389566464, 47664389566464,
+STORE, 47664389566464, 47664389570559,
+STORE, 47664389570560, 47664389578751,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389578751,
+STORE, 47664389578752, 47664389586943,
+ERASE, 47664389382144, 47664389382144,
+STORE, 47664389382144, 47664389398527,
+STORE, 47664389398528, 47664389406719,
+ERASE, 47664389570560, 47664389570560,
+STORE, 47664389570560, 47664389574655,
+STORE, 47664389574656, 47664389578751,
+ERASE, 47664389533696, 47664389533696,
+STORE, 47664389533696, 47664389537791,
+STORE, 47664389537792, 47664389541887,
+ERASE, 47664387362816, 47664387362816,
+STORE, 47664387362816, 47664387559423,
+STORE, 47664387559424, 47664387567615,
+ERASE, 47664384524288, 47664384524288,
+STORE, 47664384524288, 47664384528383,
+STORE, 47664384528384, 47664384532479,
+ERASE, 94071213555712, 94071213555712,
+STORE, 94071213555712, 94071213563903,
+STORE, 94071213563904, 94071213567999,
+ERASE, 139968410804224, 139968410804224,
+STORE, 139968410804224, 139968410808319,
+STORE, 139968410808320, 139968410812415,
+ERASE, 47664384352256, 47664384352256,
+STORE, 94071244402688, 94071244537855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728271503360, 140737488351231,
+ERASE, 140728271503360, 140728271503360,
+STORE, 140728271503360, 140728271507455,
+STORE, 94410361982976, 94410362155007,
+ERASE, 94410361982976, 94410361982976,
+STORE, 94410361982976, 94410361999359,
+STORE, 94410361999360, 94410362155007,
+ERASE, 94410361999360, 94410361999360,
+STORE, 94410361999360, 94410362101759,
+STORE, 94410362101760, 94410362142719,
+STORE, 94410362142720, 94410362155007,
+STORE, 140351953997824, 140351954169855,
+ERASE, 140351953997824, 140351953997824,
+STORE, 140351953997824, 140351954001919,
+STORE, 140351954001920, 140351954169855,
+ERASE, 140351954001920, 140351954001920,
+STORE, 140351954001920, 140351954124799,
+STORE, 140351954124800, 140351954157567,
+STORE, 140351954157568, 140351954165759,
+STORE, 140351954165760, 140351954169855,
+STORE, 140728272429056, 140728272433151,
+STORE, 140728272416768, 140728272429055,
+STORE, 47280840998912, 47280841007103,
+STORE, 47280841007104, 47280841015295,
+STORE, 47280841015296, 47280841179135,
+ERASE, 47280841015296, 47280841015296,
+STORE, 47280841015296, 47280841027583,
+STORE, 47280841027584, 47280841179135,
+STORE, 47280841125888, 47280841179135,
+STORE, 47280841027584, 47280841125887,
+ERASE, 47280841027584, 47280841027584,
+STORE, 47280841027584, 47280841125887,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841125888, 47280841170943,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841179135,
+ERASE, 47280841125888, 47280841125888,
+STORE, 47280841125888, 47280841170943,
+STORE, 47280841170944, 47280841179135,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841179135,
+STORE, 47280841179136, 47280844230655,
+STORE, 47280841723904, 47280844230655,
+STORE, 47280841179136, 47280841723903,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280844009471,
+STORE, 47280844009472, 47280844230655,
+STORE, 47280843419648, 47280844009471,
+STORE, 47280841723904, 47280843419647,
+ERASE, 47280841723904, 47280841723904,
+STORE, 47280841723904, 47280843419647,
+STORE, 47280844005376, 47280844009471,
+STORE, 47280843419648, 47280844005375,
+ERASE, 47280843419648, 47280843419648,
+STORE, 47280843419648, 47280844005375,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844214271,
+ERASE, 47280844214272, 47280844214272,
+STORE, 47280844214272, 47280844230655,
+STORE, 47280844230656, 47280846069759,
+STORE, 47280844369920, 47280846069759,
+STORE, 47280844230656, 47280844369919,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280846028799,
+STORE, 47280846028800, 47280846069759,
+STORE, 47280845713408, 47280846028799,
+STORE, 47280844369920, 47280845713407,
+ERASE, 47280844369920, 47280844369920,
+STORE, 47280844369920, 47280845713407,
+STORE, 47280846024704, 47280846028799,
+STORE, 47280845713408, 47280846024703,
+ERASE, 47280845713408, 47280845713408,
+STORE, 47280845713408, 47280846024703,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846053375,
+ERASE, 47280846053376, 47280846053376,
+STORE, 47280846053376, 47280846069759,
+STORE, 47280846069760, 47280846204927,
+ERASE, 47280846069760, 47280846069760,
+STORE, 47280846069760, 47280846094335,
+STORE, 47280846094336, 47280846204927,
+STORE, 47280846155776, 47280846204927,
+STORE, 47280846094336, 47280846155775,
+ERASE, 47280846094336, 47280846094336,
+STORE, 47280846094336, 47280846155775,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846155776, 47280846180351,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846204927,
+ERASE, 47280846155776, 47280846155776,
+STORE, 47280846155776, 47280846180351,
+STORE, 47280846180352, 47280846204927,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846188543,
+ERASE, 47280846188544, 47280846188544,
+STORE, 47280846188544, 47280846204927,
+STORE, 47280846204928, 47280846225407,
+ERASE, 47280846204928, 47280846204928,
+STORE, 47280846204928, 47280846209023,
+STORE, 47280846209024, 47280846225407,
+STORE, 47280846213120, 47280846225407,
+STORE, 47280846209024, 47280846213119,
+ERASE, 47280846209024, 47280846209024,
+STORE, 47280846209024, 47280846213119,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846213120, 47280846217215,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846225407,
+ERASE, 47280846213120, 47280846213120,
+STORE, 47280846213120, 47280846217215,
+STORE, 47280846217216, 47280846225407,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846225407,
+STORE, 47280846225408, 47280846233599,
+ERASE, 47280846028800, 47280846028800,
+STORE, 47280846028800, 47280846045183,
+STORE, 47280846045184, 47280846053375,
+ERASE, 47280846217216, 47280846217216,
+STORE, 47280846217216, 47280846221311,
+STORE, 47280846221312, 47280846225407,
+ERASE, 47280846180352, 47280846180352,
+STORE, 47280846180352, 47280846184447,
+STORE, 47280846184448, 47280846188543,
+ERASE, 47280844009472, 47280844009472,
+STORE, 47280844009472, 47280844206079,
+STORE, 47280844206080, 47280844214271,
+ERASE, 47280841170944, 47280841170944,
+STORE, 47280841170944, 47280841175039,
+STORE, 47280841175040, 47280841179135,
+ERASE, 94410362142720, 94410362142720,
+STORE, 94410362142720, 94410362150911,
+STORE, 94410362150912, 94410362155007,
+ERASE, 140351954157568, 140351954157568,
+STORE, 140351954157568, 140351954161663,
+STORE, 140351954161664, 140351954165759,
+ERASE, 47280840998912, 47280840998912,
+STORE, 94410379456512, 94410379591679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732946362368, 140737488351231,
+ERASE, 140732946362368, 140732946362368,
+STORE, 140732946362368, 140732946366463,
+STORE, 94352937934848, 94352938106879,
+ERASE, 94352937934848, 94352937934848,
+STORE, 94352937934848, 94352937951231,
+STORE, 94352937951232, 94352938106879,
+ERASE, 94352937951232, 94352937951232,
+STORE, 94352937951232, 94352938053631,
+STORE, 94352938053632, 94352938094591,
+STORE, 94352938094592, 94352938106879,
+STORE, 140595518742528, 140595518914559,
+ERASE, 140595518742528, 140595518742528,
+STORE, 140595518742528, 140595518746623,
+STORE, 140595518746624, 140595518914559,
+ERASE, 140595518746624, 140595518746624,
+STORE, 140595518746624, 140595518869503,
+STORE, 140595518869504, 140595518902271,
+STORE, 140595518902272, 140595518910463,
+STORE, 140595518910464, 140595518914559,
+STORE, 140732947468288, 140732947472383,
+STORE, 140732947456000, 140732947468287,
+STORE, 47037276254208, 47037276262399,
+STORE, 47037276262400, 47037276270591,
+STORE, 47037276270592, 47037276434431,
+ERASE, 47037276270592, 47037276270592,
+STORE, 47037276270592, 47037276282879,
+STORE, 47037276282880, 47037276434431,
+STORE, 47037276381184, 47037276434431,
+STORE, 47037276282880, 47037276381183,
+ERASE, 47037276282880, 47037276282880,
+STORE, 47037276282880, 47037276381183,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276381184, 47037276426239,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276434431,
+ERASE, 47037276381184, 47037276381184,
+STORE, 47037276381184, 47037276426239,
+STORE, 47037276426240, 47037276434431,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276434431,
+STORE, 47037276434432, 47037279485951,
+STORE, 47037276979200, 47037279485951,
+STORE, 47037276434432, 47037276979199,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037279264767,
+STORE, 47037279264768, 47037279485951,
+STORE, 47037278674944, 47037279264767,
+STORE, 47037276979200, 47037278674943,
+ERASE, 47037276979200, 47037276979200,
+STORE, 47037276979200, 47037278674943,
+STORE, 47037279260672, 47037279264767,
+STORE, 47037278674944, 47037279260671,
+ERASE, 47037278674944, 47037278674944,
+STORE, 47037278674944, 47037279260671,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279469567,
+ERASE, 47037279469568, 47037279469568,
+STORE, 47037279469568, 47037279485951,
+STORE, 47037279485952, 47037281325055,
+STORE, 47037279625216, 47037281325055,
+STORE, 47037279485952, 47037279625215,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037281284095,
+STORE, 47037281284096, 47037281325055,
+STORE, 47037280968704, 47037281284095,
+STORE, 47037279625216, 47037280968703,
+ERASE, 47037279625216, 47037279625216,
+STORE, 47037279625216, 47037280968703,
+STORE, 47037281280000, 47037281284095,
+STORE, 47037280968704, 47037281279999,
+ERASE, 47037280968704, 47037280968704,
+STORE, 47037280968704, 47037281279999,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281308671,
+ERASE, 47037281308672, 47037281308672,
+STORE, 47037281308672, 47037281325055,
+STORE, 47037281325056, 47037281460223,
+ERASE, 47037281325056, 47037281325056,
+STORE, 47037281325056, 47037281349631,
+STORE, 47037281349632, 47037281460223,
+STORE, 47037281411072, 47037281460223,
+STORE, 47037281349632, 47037281411071,
+ERASE, 47037281349632, 47037281349632,
+STORE, 47037281349632, 47037281411071,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281411072, 47037281435647,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281460223,
+ERASE, 47037281411072, 47037281411072,
+STORE, 47037281411072, 47037281435647,
+STORE, 47037281435648, 47037281460223,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281443839,
+ERASE, 47037281443840, 47037281443840,
+STORE, 47037281443840, 47037281460223,
+STORE, 47037281460224, 47037281480703,
+ERASE, 47037281460224, 47037281460224,
+STORE, 47037281460224, 47037281464319,
+STORE, 47037281464320, 47037281480703,
+STORE, 47037281468416, 47037281480703,
+STORE, 47037281464320, 47037281468415,
+ERASE, 47037281464320, 47037281464320,
+STORE, 47037281464320, 47037281468415,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281468416, 47037281472511,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281480703,
+ERASE, 47037281468416, 47037281468416,
+STORE, 47037281468416, 47037281472511,
+STORE, 47037281472512, 47037281480703,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281480703,
+STORE, 47037281480704, 47037281488895,
+ERASE, 47037281284096, 47037281284096,
+STORE, 47037281284096, 47037281300479,
+STORE, 47037281300480, 47037281308671,
+ERASE, 47037281472512, 47037281472512,
+STORE, 47037281472512, 47037281476607,
+STORE, 47037281476608, 47037281480703,
+ERASE, 47037281435648, 47037281435648,
+STORE, 47037281435648, 47037281439743,
+STORE, 47037281439744, 47037281443839,
+ERASE, 47037279264768, 47037279264768,
+STORE, 47037279264768, 47037279461375,
+STORE, 47037279461376, 47037279469567,
+ERASE, 47037276426240, 47037276426240,
+STORE, 47037276426240, 47037276430335,
+STORE, 47037276430336, 47037276434431,
+ERASE, 94352938094592, 94352938094592,
+STORE, 94352938094592, 94352938102783,
+STORE, 94352938102784, 94352938106879,
+ERASE, 140595518902272, 140595518902272,
+STORE, 140595518902272, 140595518906367,
+STORE, 140595518906368, 140595518910463,
+ERASE, 47037276254208, 47037276254208,
+STORE, 94352938438656, 94352938573823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733506027520, 140737488351231,
+ERASE, 140733506027520, 140733506027520,
+STORE, 140733506027520, 140733506031615,
+STORE, 94150123073536, 94150123245567,
+ERASE, 94150123073536, 94150123073536,
+STORE, 94150123073536, 94150123089919,
+STORE, 94150123089920, 94150123245567,
+ERASE, 94150123089920, 94150123089920,
+STORE, 94150123089920, 94150123192319,
+STORE, 94150123192320, 94150123233279,
+STORE, 94150123233280, 94150123245567,
+STORE, 140081290375168, 140081290547199,
+ERASE, 140081290375168, 140081290375168,
+STORE, 140081290375168, 140081290379263,
+STORE, 140081290379264, 140081290547199,
+ERASE, 140081290379264, 140081290379264,
+STORE, 140081290379264, 140081290502143,
+STORE, 140081290502144, 140081290534911,
+STORE, 140081290534912, 140081290543103,
+STORE, 140081290543104, 140081290547199,
+STORE, 140733506707456, 140733506711551,
+STORE, 140733506695168, 140733506707455,
+STORE, 47551504621568, 47551504629759,
+STORE, 47551504629760, 47551504637951,
+STORE, 47551504637952, 47551504801791,
+ERASE, 47551504637952, 47551504637952,
+STORE, 47551504637952, 47551504650239,
+STORE, 47551504650240, 47551504801791,
+STORE, 47551504748544, 47551504801791,
+STORE, 47551504650240, 47551504748543,
+ERASE, 47551504650240, 47551504650240,
+STORE, 47551504650240, 47551504748543,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504748544, 47551504793599,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504801791,
+ERASE, 47551504748544, 47551504748544,
+STORE, 47551504748544, 47551504793599,
+STORE, 47551504793600, 47551504801791,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504801791,
+STORE, 47551504801792, 47551507853311,
+STORE, 47551505346560, 47551507853311,
+STORE, 47551504801792, 47551505346559,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507632127,
+STORE, 47551507632128, 47551507853311,
+STORE, 47551507042304, 47551507632127,
+STORE, 47551505346560, 47551507042303,
+ERASE, 47551505346560, 47551505346560,
+STORE, 47551505346560, 47551507042303,
+STORE, 47551507628032, 47551507632127,
+STORE, 47551507042304, 47551507628031,
+ERASE, 47551507042304, 47551507042304,
+STORE, 47551507042304, 47551507628031,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507836927,
+ERASE, 47551507836928, 47551507836928,
+STORE, 47551507836928, 47551507853311,
+STORE, 47551507853312, 47551509692415,
+STORE, 47551507992576, 47551509692415,
+STORE, 47551507853312, 47551507992575,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509651455,
+STORE, 47551509651456, 47551509692415,
+STORE, 47551509336064, 47551509651455,
+STORE, 47551507992576, 47551509336063,
+ERASE, 47551507992576, 47551507992576,
+STORE, 47551507992576, 47551509336063,
+STORE, 47551509647360, 47551509651455,
+STORE, 47551509336064, 47551509647359,
+ERASE, 47551509336064, 47551509336064,
+STORE, 47551509336064, 47551509647359,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509676031,
+ERASE, 47551509676032, 47551509676032,
+STORE, 47551509676032, 47551509692415,
+STORE, 47551509692416, 47551509827583,
+ERASE, 47551509692416, 47551509692416,
+STORE, 47551509692416, 47551509716991,
+STORE, 47551509716992, 47551509827583,
+STORE, 47551509778432, 47551509827583,
+STORE, 47551509716992, 47551509778431,
+ERASE, 47551509716992, 47551509716992,
+STORE, 47551509716992, 47551509778431,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509778432, 47551509803007,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509827583,
+ERASE, 47551509778432, 47551509778432,
+STORE, 47551509778432, 47551509803007,
+STORE, 47551509803008, 47551509827583,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509811199,
+ERASE, 47551509811200, 47551509811200,
+STORE, 47551509811200, 47551509827583,
+STORE, 47551509827584, 47551509848063,
+ERASE, 47551509827584, 47551509827584,
+STORE, 47551509827584, 47551509831679,
+STORE, 47551509831680, 47551509848063,
+STORE, 47551509835776, 47551509848063,
+STORE, 47551509831680, 47551509835775,
+ERASE, 47551509831680, 47551509831680,
+STORE, 47551509831680, 47551509835775,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509835776, 47551509839871,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509848063,
+ERASE, 47551509835776, 47551509835776,
+STORE, 47551509835776, 47551509839871,
+STORE, 47551509839872, 47551509848063,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509848063,
+STORE, 47551509848064, 47551509856255,
+ERASE, 47551509651456, 47551509651456,
+STORE, 47551509651456, 47551509667839,
+STORE, 47551509667840, 47551509676031,
+ERASE, 47551509839872, 47551509839872,
+STORE, 47551509839872, 47551509843967,
+STORE, 47551509843968, 47551509848063,
+ERASE, 47551509803008, 47551509803008,
+STORE, 47551509803008, 47551509807103,
+STORE, 47551509807104, 47551509811199,
+ERASE, 47551507632128, 47551507632128,
+STORE, 47551507632128, 47551507828735,
+STORE, 47551507828736, 47551507836927,
+ERASE, 47551504793600, 47551504793600,
+STORE, 47551504793600, 47551504797695,
+STORE, 47551504797696, 47551504801791,
+ERASE, 94150123233280, 94150123233280,
+STORE, 94150123233280, 94150123241471,
+STORE, 94150123241472, 94150123245567,
+ERASE, 140081290534912, 140081290534912,
+STORE, 140081290534912, 140081290539007,
+STORE, 140081290539008, 140081290543103,
+ERASE, 47551504621568, 47551504621568,
+STORE, 94150148112384, 94150148247551,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734389334016, 140737488351231,
+ERASE, 140734389334016, 140734389334016,
+STORE, 140734389334016, 140734389338111,
+STORE, 94844636606464, 94844636778495,
+ERASE, 94844636606464, 94844636606464,
+STORE, 94844636606464, 94844636622847,
+STORE, 94844636622848, 94844636778495,
+ERASE, 94844636622848, 94844636622848,
+STORE, 94844636622848, 94844636725247,
+STORE, 94844636725248, 94844636766207,
+STORE, 94844636766208, 94844636778495,
+STORE, 139922765217792, 139922765389823,
+ERASE, 139922765217792, 139922765217792,
+STORE, 139922765217792, 139922765221887,
+STORE, 139922765221888, 139922765389823,
+ERASE, 139922765221888, 139922765221888,
+STORE, 139922765221888, 139922765344767,
+STORE, 139922765344768, 139922765377535,
+STORE, 139922765377536, 139922765385727,
+STORE, 139922765385728, 139922765389823,
+STORE, 140734389678080, 140734389682175,
+STORE, 140734389665792, 140734389678079,
+STORE, 47710029778944, 47710029787135,
+STORE, 47710029787136, 47710029795327,
+STORE, 47710029795328, 47710029959167,
+ERASE, 47710029795328, 47710029795328,
+STORE, 47710029795328, 47710029807615,
+STORE, 47710029807616, 47710029959167,
+STORE, 47710029905920, 47710029959167,
+STORE, 47710029807616, 47710029905919,
+ERASE, 47710029807616, 47710029807616,
+STORE, 47710029807616, 47710029905919,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029905920, 47710029950975,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029959167,
+ERASE, 47710029905920, 47710029905920,
+STORE, 47710029905920, 47710029950975,
+STORE, 47710029950976, 47710029959167,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029959167,
+STORE, 47710029959168, 47710033010687,
+STORE, 47710030503936, 47710033010687,
+STORE, 47710029959168, 47710030503935,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032789503,
+STORE, 47710032789504, 47710033010687,
+STORE, 47710032199680, 47710032789503,
+STORE, 47710030503936, 47710032199679,
+ERASE, 47710030503936, 47710030503936,
+STORE, 47710030503936, 47710032199679,
+STORE, 47710032785408, 47710032789503,
+STORE, 47710032199680, 47710032785407,
+ERASE, 47710032199680, 47710032199680,
+STORE, 47710032199680, 47710032785407,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032994303,
+ERASE, 47710032994304, 47710032994304,
+STORE, 47710032994304, 47710033010687,
+STORE, 47710033010688, 47710034849791,
+STORE, 47710033149952, 47710034849791,
+STORE, 47710033010688, 47710033149951,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034808831,
+STORE, 47710034808832, 47710034849791,
+STORE, 47710034493440, 47710034808831,
+STORE, 47710033149952, 47710034493439,
+ERASE, 47710033149952, 47710033149952,
+STORE, 47710033149952, 47710034493439,
+STORE, 47710034804736, 47710034808831,
+STORE, 47710034493440, 47710034804735,
+ERASE, 47710034493440, 47710034493440,
+STORE, 47710034493440, 47710034804735,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034833407,
+ERASE, 47710034833408, 47710034833408,
+STORE, 47710034833408, 47710034849791,
+STORE, 47710034849792, 47710034984959,
+ERASE, 47710034849792, 47710034849792,
+STORE, 47710034849792, 47710034874367,
+STORE, 47710034874368, 47710034984959,
+STORE, 47710034935808, 47710034984959,
+STORE, 47710034874368, 47710034935807,
+ERASE, 47710034874368, 47710034874368,
+STORE, 47710034874368, 47710034935807,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034935808, 47710034960383,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034984959,
+ERASE, 47710034935808, 47710034935808,
+STORE, 47710034935808, 47710034960383,
+STORE, 47710034960384, 47710034984959,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034968575,
+ERASE, 47710034968576, 47710034968576,
+STORE, 47710034968576, 47710034984959,
+STORE, 47710034984960, 47710035005439,
+ERASE, 47710034984960, 47710034984960,
+STORE, 47710034984960, 47710034989055,
+STORE, 47710034989056, 47710035005439,
+STORE, 47710034993152, 47710035005439,
+STORE, 47710034989056, 47710034993151,
+ERASE, 47710034989056, 47710034989056,
+STORE, 47710034989056, 47710034993151,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710034993152, 47710034997247,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710035005439,
+ERASE, 47710034993152, 47710034993152,
+STORE, 47710034993152, 47710034997247,
+STORE, 47710034997248, 47710035005439,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035005439,
+STORE, 47710035005440, 47710035013631,
+ERASE, 47710034808832, 47710034808832,
+STORE, 47710034808832, 47710034825215,
+STORE, 47710034825216, 47710034833407,
+ERASE, 47710034997248, 47710034997248,
+STORE, 47710034997248, 47710035001343,
+STORE, 47710035001344, 47710035005439,
+ERASE, 47710034960384, 47710034960384,
+STORE, 47710034960384, 47710034964479,
+STORE, 47710034964480, 47710034968575,
+ERASE, 47710032789504, 47710032789504,
+STORE, 47710032789504, 47710032986111,
+STORE, 47710032986112, 47710032994303,
+ERASE, 47710029950976, 47710029950976,
+STORE, 47710029950976, 47710029955071,
+STORE, 47710029955072, 47710029959167,
+ERASE, 94844636766208, 94844636766208,
+STORE, 94844636766208, 94844636774399,
+STORE, 94844636774400, 94844636778495,
+ERASE, 139922765377536, 139922765377536,
+STORE, 139922765377536, 139922765381631,
+STORE, 139922765381632, 139922765385727,
+ERASE, 47710029778944, 47710029778944,
+STORE, 94844641775616, 94844641910783,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732213886976, 140737488351231,
+ERASE, 140732213886976, 140732213886976,
+STORE, 140732213886976, 140732213891071,
+STORE, 94240508887040, 94240509059071,
+ERASE, 94240508887040, 94240508887040,
+STORE, 94240508887040, 94240508903423,
+STORE, 94240508903424, 94240509059071,
+ERASE, 94240508903424, 94240508903424,
+STORE, 94240508903424, 94240509005823,
+STORE, 94240509005824, 94240509046783,
+STORE, 94240509046784, 94240509059071,
+STORE, 140275106516992, 140275106689023,
+ERASE, 140275106516992, 140275106516992,
+STORE, 140275106516992, 140275106521087,
+STORE, 140275106521088, 140275106689023,
+ERASE, 140275106521088, 140275106521088,
+STORE, 140275106521088, 140275106643967,
+STORE, 140275106643968, 140275106676735,
+STORE, 140275106676736, 140275106684927,
+STORE, 140275106684928, 140275106689023,
+STORE, 140732213977088, 140732213981183,
+STORE, 140732213964800, 140732213977087,
+STORE, 47357688479744, 47357688487935,
+STORE, 47357688487936, 47357688496127,
+STORE, 47357688496128, 47357688659967,
+ERASE, 47357688496128, 47357688496128,
+STORE, 47357688496128, 47357688508415,
+STORE, 47357688508416, 47357688659967,
+STORE, 47357688606720, 47357688659967,
+STORE, 47357688508416, 47357688606719,
+ERASE, 47357688508416, 47357688508416,
+STORE, 47357688508416, 47357688606719,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688606720, 47357688651775,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688659967,
+ERASE, 47357688606720, 47357688606720,
+STORE, 47357688606720, 47357688651775,
+STORE, 47357688651776, 47357688659967,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688659967,
+STORE, 47357688659968, 47357691711487,
+STORE, 47357689204736, 47357691711487,
+STORE, 47357688659968, 47357689204735,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357691490303,
+STORE, 47357691490304, 47357691711487,
+STORE, 47357690900480, 47357691490303,
+STORE, 47357689204736, 47357690900479,
+ERASE, 47357689204736, 47357689204736,
+STORE, 47357689204736, 47357690900479,
+STORE, 47357691486208, 47357691490303,
+STORE, 47357690900480, 47357691486207,
+ERASE, 47357690900480, 47357690900480,
+STORE, 47357690900480, 47357691486207,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691695103,
+ERASE, 47357691695104, 47357691695104,
+STORE, 47357691695104, 47357691711487,
+STORE, 47357691711488, 47357693550591,
+STORE, 47357691850752, 47357693550591,
+STORE, 47357691711488, 47357691850751,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693509631,
+STORE, 47357693509632, 47357693550591,
+STORE, 47357693194240, 47357693509631,
+STORE, 47357691850752, 47357693194239,
+ERASE, 47357691850752, 47357691850752,
+STORE, 47357691850752, 47357693194239,
+STORE, 47357693505536, 47357693509631,
+STORE, 47357693194240, 47357693505535,
+ERASE, 47357693194240, 47357693194240,
+STORE, 47357693194240, 47357693505535,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693534207,
+ERASE, 47357693534208, 47357693534208,
+STORE, 47357693534208, 47357693550591,
+STORE, 47357693550592, 47357693685759,
+ERASE, 47357693550592, 47357693550592,
+STORE, 47357693550592, 47357693575167,
+STORE, 47357693575168, 47357693685759,
+STORE, 47357693636608, 47357693685759,
+STORE, 47357693575168, 47357693636607,
+ERASE, 47357693575168, 47357693575168,
+STORE, 47357693575168, 47357693636607,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693636608, 47357693661183,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693685759,
+ERASE, 47357693636608, 47357693636608,
+STORE, 47357693636608, 47357693661183,
+STORE, 47357693661184, 47357693685759,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693669375,
+ERASE, 47357693669376, 47357693669376,
+STORE, 47357693669376, 47357693685759,
+STORE, 47357693685760, 47357693706239,
+ERASE, 47357693685760, 47357693685760,
+STORE, 47357693685760, 47357693689855,
+STORE, 47357693689856, 47357693706239,
+STORE, 47357693693952, 47357693706239,
+STORE, 47357693689856, 47357693693951,
+ERASE, 47357693689856, 47357693689856,
+STORE, 47357693689856, 47357693693951,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693693952, 47357693698047,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693706239,
+ERASE, 47357693693952, 47357693693952,
+STORE, 47357693693952, 47357693698047,
+STORE, 47357693698048, 47357693706239,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693706239,
+STORE, 47357693706240, 47357693714431,
+ERASE, 47357693509632, 47357693509632,
+STORE, 47357693509632, 47357693526015,
+STORE, 47357693526016, 47357693534207,
+ERASE, 47357693698048, 47357693698048,
+STORE, 47357693698048, 47357693702143,
+STORE, 47357693702144, 47357693706239,
+ERASE, 47357693661184, 47357693661184,
+STORE, 47357693661184, 47357693665279,
+STORE, 47357693665280, 47357693669375,
+ERASE, 47357691490304, 47357691490304,
+STORE, 47357691490304, 47357691686911,
+STORE, 47357691686912, 47357691695103,
+ERASE, 47357688651776, 47357688651776,
+STORE, 47357688651776, 47357688655871,
+STORE, 47357688655872, 47357688659967,
+ERASE, 94240509046784, 94240509046784,
+STORE, 94240509046784, 94240509054975,
+STORE, 94240509054976, 94240509059071,
+ERASE, 140275106676736, 140275106676736,
+STORE, 140275106676736, 140275106680831,
+STORE, 140275106680832, 140275106684927,
+ERASE, 47357688479744, 47357688479744,
+STORE, 94240518361088, 94240518496255,
+STORE, 140737488347136, 140737488351231,
+STORE, 140732688277504, 140737488351231,
+ERASE, 140732688277504, 140732688277504,
+STORE, 140732688277504, 140732688281599,
+STORE, 94629171351552, 94629172064255,
+ERASE, 94629171351552, 94629171351552,
+STORE, 94629171351552, 94629171400703,
+STORE, 94629171400704, 94629172064255,
+ERASE, 94629171400704, 94629171400704,
+STORE, 94629171400704, 94629171945471,
+STORE, 94629171945472, 94629172043775,
+STORE, 94629172043776, 94629172064255,
+STORE, 139770707644416, 139770707816447,
+ERASE, 139770707644416, 139770707644416,
+STORE, 139770707644416, 139770707648511,
+STORE, 139770707648512, 139770707816447,
+ERASE, 139770707648512, 139770707648512,
+STORE, 139770707648512, 139770707771391,
+STORE, 139770707771392, 139770707804159,
+STORE, 139770707804160, 139770707812351,
+STORE, 139770707812352, 139770707816447,
+STORE, 140732689121280, 140732689125375,
+STORE, 140732689108992, 140732689121279,
+STORE, 47862087352320, 47862087360511,
+STORE, 47862087360512, 47862087368703,
+STORE, 47862087368704, 47862087475199,
+STORE, 47862087385088, 47862087475199,
+STORE, 47862087368704, 47862087385087,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087458815,
+STORE, 47862087458816, 47862087475199,
+STORE, 47862087438336, 47862087458815,
+STORE, 47862087385088, 47862087438335,
+ERASE, 47862087385088, 47862087385088,
+STORE, 47862087385088, 47862087438335,
+STORE, 47862087454720, 47862087458815,
+STORE, 47862087438336, 47862087454719,
+ERASE, 47862087438336, 47862087438336,
+STORE, 47862087438336, 47862087454719,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087467007,
+ERASE, 47862087467008, 47862087467008,
+STORE, 47862087467008, 47862087475199,
+STORE, 47862087475200, 47862089314303,
+STORE, 47862087614464, 47862089314303,
+STORE, 47862087475200, 47862087614463,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862089273343,
+STORE, 47862089273344, 47862089314303,
+STORE, 47862088957952, 47862089273343,
+STORE, 47862087614464, 47862088957951,
+ERASE, 47862087614464, 47862087614464,
+STORE, 47862087614464, 47862088957951,
+STORE, 47862089269248, 47862089273343,
+STORE, 47862088957952, 47862089269247,
+ERASE, 47862088957952, 47862088957952,
+STORE, 47862088957952, 47862089269247,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089297919,
+ERASE, 47862089297920, 47862089297920,
+STORE, 47862089297920, 47862089314303,
+STORE, 47862089297920, 47862089326591,
+ERASE, 47862089273344, 47862089273344,
+STORE, 47862089273344, 47862089289727,
+STORE, 47862089289728, 47862089297919,
+ERASE, 47862087458816, 47862087458816,
+STORE, 47862087458816, 47862087462911,
+STORE, 47862087462912, 47862087467007,
+ERASE, 94629172043776, 94629172043776,
+STORE, 94629172043776, 94629172060159,
+STORE, 94629172060160, 94629172064255,
+ERASE, 139770707804160, 139770707804160,
+STORE, 139770707804160, 139770707808255,
+STORE, 139770707808256, 139770707812351,
+ERASE, 47862087352320, 47862087352320,
+STORE, 94629197533184, 94629197668351,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727540711424, 140737488351231,
+ERASE, 140727540711424, 140727540711424,
+STORE, 140727540711424, 140727540715519,
+STORE, 94299865313280, 94299866025983,
+ERASE, 94299865313280, 94299865313280,
+STORE, 94299865313280, 94299865362431,
+STORE, 94299865362432, 94299866025983,
+ERASE, 94299865362432, 94299865362432,
+STORE, 94299865362432, 94299865907199,
+STORE, 94299865907200, 94299866005503,
+STORE, 94299866005504, 94299866025983,
+STORE, 140680268763136, 140680268935167,
+ERASE, 140680268763136, 140680268763136,
+STORE, 140680268763136, 140680268767231,
+STORE, 140680268767232, 140680268935167,
+ERASE, 140680268767232, 140680268767232,
+STORE, 140680268767232, 140680268890111,
+STORE, 140680268890112, 140680268922879,
+STORE, 140680268922880, 140680268931071,
+STORE, 140680268931072, 140680268935167,
+STORE, 140727541424128, 140727541428223,
+STORE, 140727541411840, 140727541424127,
+STORE, 46952526233600, 46952526241791,
+STORE, 46952526241792, 46952526249983,
+STORE, 46952526249984, 46952526356479,
+STORE, 46952526266368, 46952526356479,
+STORE, 46952526249984, 46952526266367,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526340095,
+STORE, 46952526340096, 46952526356479,
+STORE, 46952526319616, 46952526340095,
+STORE, 46952526266368, 46952526319615,
+ERASE, 46952526266368, 46952526266368,
+STORE, 46952526266368, 46952526319615,
+STORE, 46952526336000, 46952526340095,
+STORE, 46952526319616, 46952526335999,
+ERASE, 46952526319616, 46952526319616,
+STORE, 46952526319616, 46952526335999,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526348287,
+ERASE, 46952526348288, 46952526348288,
+STORE, 46952526348288, 46952526356479,
+STORE, 46952526356480, 46952528195583,
+STORE, 46952526495744, 46952528195583,
+STORE, 46952526356480, 46952526495743,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952528154623,
+STORE, 46952528154624, 46952528195583,
+STORE, 46952527839232, 46952528154623,
+STORE, 46952526495744, 46952527839231,
+ERASE, 46952526495744, 46952526495744,
+STORE, 46952526495744, 46952527839231,
+STORE, 46952528150528, 46952528154623,
+STORE, 46952527839232, 46952528150527,
+ERASE, 46952527839232, 46952527839232,
+STORE, 46952527839232, 46952528150527,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528179199,
+ERASE, 46952528179200, 46952528179200,
+STORE, 46952528179200, 46952528195583,
+STORE, 46952528179200, 46952528207871,
+ERASE, 46952528154624, 46952528154624,
+STORE, 46952528154624, 46952528171007,
+STORE, 46952528171008, 46952528179199,
+ERASE, 46952526340096, 46952526340096,
+STORE, 46952526340096, 46952526344191,
+STORE, 46952526344192, 46952526348287,
+ERASE, 94299866005504, 94299866005504,
+STORE, 94299866005504, 94299866021887,
+STORE, 94299866021888, 94299866025983,
+ERASE, 140680268922880, 140680268922880,
+STORE, 140680268922880, 140680268926975,
+STORE, 140680268926976, 140680268931071,
+ERASE, 46952526233600, 46952526233600,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722874793984, 140737488351231,
+ERASE, 140722874793984, 140722874793984,
+STORE, 140722874793984, 140722874798079,
+STORE, 94448916213760, 94448916926463,
+ERASE, 94448916213760, 94448916213760,
+STORE, 94448916213760, 94448916262911,
+STORE, 94448916262912, 94448916926463,
+ERASE, 94448916262912, 94448916262912,
+STORE, 94448916262912, 94448916807679,
+STORE, 94448916807680, 94448916905983,
+STORE, 94448916905984, 94448916926463,
+STORE, 140389117046784, 140389117218815,
+ERASE, 140389117046784, 140389117046784,
+STORE, 140389117046784, 140389117050879,
+STORE, 140389117050880, 140389117218815,
+ERASE, 140389117050880, 140389117050880,
+STORE, 140389117050880, 140389117173759,
+STORE, 140389117173760, 140389117206527,
+STORE, 140389117206528, 140389117214719,
+STORE, 140389117214720, 140389117218815,
+STORE, 140722875297792, 140722875301887,
+STORE, 140722875285504, 140722875297791,
+STORE, 47243677949952, 47243677958143,
+STORE, 47243677958144, 47243677966335,
+STORE, 47243677966336, 47243678072831,
+STORE, 47243677982720, 47243678072831,
+STORE, 47243677966336, 47243677982719,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678056447,
+STORE, 47243678056448, 47243678072831,
+STORE, 47243678035968, 47243678056447,
+STORE, 47243677982720, 47243678035967,
+ERASE, 47243677982720, 47243677982720,
+STORE, 47243677982720, 47243678035967,
+STORE, 47243678052352, 47243678056447,
+STORE, 47243678035968, 47243678052351,
+ERASE, 47243678035968, 47243678035968,
+STORE, 47243678035968, 47243678052351,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678064639,
+ERASE, 47243678064640, 47243678064640,
+STORE, 47243678064640, 47243678072831,
+STORE, 47243678072832, 47243679911935,
+STORE, 47243678212096, 47243679911935,
+STORE, 47243678072832, 47243678212095,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679870975,
+STORE, 47243679870976, 47243679911935,
+STORE, 47243679555584, 47243679870975,
+STORE, 47243678212096, 47243679555583,
+ERASE, 47243678212096, 47243678212096,
+STORE, 47243678212096, 47243679555583,
+STORE, 47243679866880, 47243679870975,
+STORE, 47243679555584, 47243679866879,
+ERASE, 47243679555584, 47243679555584,
+STORE, 47243679555584, 47243679866879,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679895551,
+ERASE, 47243679895552, 47243679895552,
+STORE, 47243679895552, 47243679911935,
+STORE, 47243679895552, 47243679924223,
+ERASE, 47243679870976, 47243679870976,
+STORE, 47243679870976, 47243679887359,
+STORE, 47243679887360, 47243679895551,
+ERASE, 47243678056448, 47243678056448,
+STORE, 47243678056448, 47243678060543,
+STORE, 47243678060544, 47243678064639,
+ERASE, 94448916905984, 94448916905984,
+STORE, 94448916905984, 94448916922367,
+STORE, 94448916922368, 94448916926463,
+ERASE, 140389117206528, 140389117206528,
+STORE, 140389117206528, 140389117210623,
+STORE, 140389117210624, 140389117214719,
+ERASE, 47243677949952, 47243677949952,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733068505088, 140737488351231,
+ERASE, 140733068505088, 140733068505088,
+STORE, 140733068505088, 140733068509183,
+STORE, 94207145750528, 94207146463231,
+ERASE, 94207145750528, 94207145750528,
+STORE, 94207145750528, 94207145799679,
+STORE, 94207145799680, 94207146463231,
+ERASE, 94207145799680, 94207145799680,
+STORE, 94207145799680, 94207146344447,
+STORE, 94207146344448, 94207146442751,
+STORE, 94207146442752, 94207146463231,
+STORE, 140684504911872, 140684505083903,
+ERASE, 140684504911872, 140684504911872,
+STORE, 140684504911872, 140684504915967,
+STORE, 140684504915968, 140684505083903,
+ERASE, 140684504915968, 140684504915968,
+STORE, 140684504915968, 140684505038847,
+STORE, 140684505038848, 140684505071615,
+STORE, 140684505071616, 140684505079807,
+STORE, 140684505079808, 140684505083903,
+STORE, 140733068607488, 140733068611583,
+STORE, 140733068595200, 140733068607487,
+STORE, 46948290084864, 46948290093055,
+STORE, 46948290093056, 46948290101247,
+STORE, 46948290101248, 46948290207743,
+STORE, 46948290117632, 46948290207743,
+STORE, 46948290101248, 46948290117631,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290191359,
+STORE, 46948290191360, 46948290207743,
+STORE, 46948290170880, 46948290191359,
+STORE, 46948290117632, 46948290170879,
+ERASE, 46948290117632, 46948290117632,
+STORE, 46948290117632, 46948290170879,
+STORE, 46948290187264, 46948290191359,
+STORE, 46948290170880, 46948290187263,
+ERASE, 46948290170880, 46948290170880,
+STORE, 46948290170880, 46948290187263,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290199551,
+ERASE, 46948290199552, 46948290199552,
+STORE, 46948290199552, 46948290207743,
+STORE, 46948290207744, 46948292046847,
+STORE, 46948290347008, 46948292046847,
+STORE, 46948290207744, 46948290347007,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948292005887,
+STORE, 46948292005888, 46948292046847,
+STORE, 46948291690496, 46948292005887,
+STORE, 46948290347008, 46948291690495,
+ERASE, 46948290347008, 46948290347008,
+STORE, 46948290347008, 46948291690495,
+STORE, 46948292001792, 46948292005887,
+STORE, 46948291690496, 46948292001791,
+ERASE, 46948291690496, 46948291690496,
+STORE, 46948291690496, 46948292001791,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292030463,
+ERASE, 46948292030464, 46948292030464,
+STORE, 46948292030464, 46948292046847,
+STORE, 46948292030464, 46948292059135,
+ERASE, 46948292005888, 46948292005888,
+STORE, 46948292005888, 46948292022271,
+STORE, 46948292022272, 46948292030463,
+ERASE, 46948290191360, 46948290191360,
+STORE, 46948290191360, 46948290195455,
+STORE, 46948290195456, 46948290199551,
+ERASE, 94207146442752, 94207146442752,
+STORE, 94207146442752, 94207146459135,
+STORE, 94207146459136, 94207146463231,
+ERASE, 140684505071616, 140684505071616,
+STORE, 140684505071616, 140684505075711,
+STORE, 140684505075712, 140684505079807,
+ERASE, 46948290084864, 46948290084864,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726367158272, 140737488351231,
+ERASE, 140726367158272, 140726367158272,
+STORE, 140726367158272, 140726367162367,
+STORE, 94436124106752, 94436124819455,
+ERASE, 94436124106752, 94436124106752,
+STORE, 94436124106752, 94436124155903,
+STORE, 94436124155904, 94436124819455,
+ERASE, 94436124155904, 94436124155904,
+STORE, 94436124155904, 94436124700671,
+STORE, 94436124700672, 94436124798975,
+STORE, 94436124798976, 94436124819455,
+STORE, 140049025044480, 140049025216511,
+ERASE, 140049025044480, 140049025044480,
+STORE, 140049025044480, 140049025048575,
+STORE, 140049025048576, 140049025216511,
+ERASE, 140049025048576, 140049025048576,
+STORE, 140049025048576, 140049025171455,
+STORE, 140049025171456, 140049025204223,
+STORE, 140049025204224, 140049025212415,
+STORE, 140049025212416, 140049025216511,
+STORE, 140726367256576, 140726367260671,
+STORE, 140726367244288, 140726367256575,
+STORE, 47583769952256, 47583769960447,
+STORE, 47583769960448, 47583769968639,
+STORE, 47583769968640, 47583770075135,
+STORE, 47583769985024, 47583770075135,
+STORE, 47583769968640, 47583769985023,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770058751,
+STORE, 47583770058752, 47583770075135,
+STORE, 47583770038272, 47583770058751,
+STORE, 47583769985024, 47583770038271,
+ERASE, 47583769985024, 47583769985024,
+STORE, 47583769985024, 47583770038271,
+STORE, 47583770054656, 47583770058751,
+STORE, 47583770038272, 47583770054655,
+ERASE, 47583770038272, 47583770038272,
+STORE, 47583770038272, 47583770054655,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770066943,
+ERASE, 47583770066944, 47583770066944,
+STORE, 47583770066944, 47583770075135,
+STORE, 47583770075136, 47583771914239,
+STORE, 47583770214400, 47583771914239,
+STORE, 47583770075136, 47583770214399,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771873279,
+STORE, 47583771873280, 47583771914239,
+STORE, 47583771557888, 47583771873279,
+STORE, 47583770214400, 47583771557887,
+ERASE, 47583770214400, 47583770214400,
+STORE, 47583770214400, 47583771557887,
+STORE, 47583771869184, 47583771873279,
+STORE, 47583771557888, 47583771869183,
+ERASE, 47583771557888, 47583771557888,
+STORE, 47583771557888, 47583771869183,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771897855,
+ERASE, 47583771897856, 47583771897856,
+STORE, 47583771897856, 47583771914239,
+STORE, 47583771897856, 47583771926527,
+ERASE, 47583771873280, 47583771873280,
+STORE, 47583771873280, 47583771889663,
+STORE, 47583771889664, 47583771897855,
+ERASE, 47583770058752, 47583770058752,
+STORE, 47583770058752, 47583770062847,
+STORE, 47583770062848, 47583770066943,
+ERASE, 94436124798976, 94436124798976,
+STORE, 94436124798976, 94436124815359,
+STORE, 94436124815360, 94436124819455,
+ERASE, 140049025204224, 140049025204224,
+STORE, 140049025204224, 140049025208319,
+STORE, 140049025208320, 140049025212415,
+ERASE, 47583769952256, 47583769952256,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727116099584, 140737488351231,
+ERASE, 140727116099584, 140727116099584,
+STORE, 140727116099584, 140727116103679,
+STORE, 94166319734784, 94166320447487,
+ERASE, 94166319734784, 94166319734784,
+STORE, 94166319734784, 94166319783935,
+STORE, 94166319783936, 94166320447487,
+ERASE, 94166319783936, 94166319783936,
+STORE, 94166319783936, 94166320328703,
+STORE, 94166320328704, 94166320427007,
+STORE, 94166320427008, 94166320447487,
+STORE, 139976559542272, 139976559714303,
+ERASE, 139976559542272, 139976559542272,
+STORE, 139976559542272, 139976559546367,
+STORE, 139976559546368, 139976559714303,
+ERASE, 139976559546368, 139976559546368,
+STORE, 139976559546368, 139976559669247,
+STORE, 139976559669248, 139976559702015,
+STORE, 139976559702016, 139976559710207,
+STORE, 139976559710208, 139976559714303,
+STORE, 140727116222464, 140727116226559,
+STORE, 140727116210176, 140727116222463,
+STORE, 47656235454464, 47656235462655,
+STORE, 47656235462656, 47656235470847,
+STORE, 47656235470848, 47656235577343,
+STORE, 47656235487232, 47656235577343,
+STORE, 47656235470848, 47656235487231,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235560959,
+STORE, 47656235560960, 47656235577343,
+STORE, 47656235540480, 47656235560959,
+STORE, 47656235487232, 47656235540479,
+ERASE, 47656235487232, 47656235487232,
+STORE, 47656235487232, 47656235540479,
+STORE, 47656235556864, 47656235560959,
+STORE, 47656235540480, 47656235556863,
+ERASE, 47656235540480, 47656235540480,
+STORE, 47656235540480, 47656235556863,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235569151,
+ERASE, 47656235569152, 47656235569152,
+STORE, 47656235569152, 47656235577343,
+STORE, 47656235577344, 47656237416447,
+STORE, 47656235716608, 47656237416447,
+STORE, 47656235577344, 47656235716607,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237375487,
+STORE, 47656237375488, 47656237416447,
+STORE, 47656237060096, 47656237375487,
+STORE, 47656235716608, 47656237060095,
+ERASE, 47656235716608, 47656235716608,
+STORE, 47656235716608, 47656237060095,
+STORE, 47656237371392, 47656237375487,
+STORE, 47656237060096, 47656237371391,
+ERASE, 47656237060096, 47656237060096,
+STORE, 47656237060096, 47656237371391,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237400063,
+ERASE, 47656237400064, 47656237400064,
+STORE, 47656237400064, 47656237416447,
+STORE, 47656237400064, 47656237428735,
+ERASE, 47656237375488, 47656237375488,
+STORE, 47656237375488, 47656237391871,
+STORE, 47656237391872, 47656237400063,
+ERASE, 47656235560960, 47656235560960,
+STORE, 47656235560960, 47656235565055,
+STORE, 47656235565056, 47656235569151,
+ERASE, 94166320427008, 94166320427008,
+STORE, 94166320427008, 94166320443391,
+STORE, 94166320443392, 94166320447487,
+ERASE, 139976559702016, 139976559702016,
+STORE, 139976559702016, 139976559706111,
+STORE, 139976559706112, 139976559710207,
+ERASE, 47656235454464, 47656235454464,
+STORE, 94166332153856, 94166332289023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726412816384, 140737488351231,
+ERASE, 140726412816384, 140726412816384,
+STORE, 140726412816384, 140726412820479,
+STORE, 94094884507648, 94094885220351,
+ERASE, 94094884507648, 94094884507648,
+STORE, 94094884507648, 94094884556799,
+STORE, 94094884556800, 94094885220351,
+ERASE, 94094884556800, 94094884556800,
+STORE, 94094884556800, 94094885101567,
+STORE, 94094885101568, 94094885199871,
+STORE, 94094885199872, 94094885220351,
+STORE, 139773773938688, 139773774110719,
+ERASE, 139773773938688, 139773773938688,
+STORE, 139773773938688, 139773773942783,
+STORE, 139773773942784, 139773774110719,
+ERASE, 139773773942784, 139773773942784,
+STORE, 139773773942784, 139773774065663,
+STORE, 139773774065664, 139773774098431,
+STORE, 139773774098432, 139773774106623,
+STORE, 139773774106624, 139773774110719,
+STORE, 140726412963840, 140726412967935,
+STORE, 140726412951552, 140726412963839,
+STORE, 47859021058048, 47859021066239,
+STORE, 47859021066240, 47859021074431,
+STORE, 47859021074432, 47859021180927,
+STORE, 47859021090816, 47859021180927,
+STORE, 47859021074432, 47859021090815,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021164543,
+STORE, 47859021164544, 47859021180927,
+STORE, 47859021144064, 47859021164543,
+STORE, 47859021090816, 47859021144063,
+ERASE, 47859021090816, 47859021090816,
+STORE, 47859021090816, 47859021144063,
+STORE, 47859021160448, 47859021164543,
+STORE, 47859021144064, 47859021160447,
+ERASE, 47859021144064, 47859021144064,
+STORE, 47859021144064, 47859021160447,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021172735,
+ERASE, 47859021172736, 47859021172736,
+STORE, 47859021172736, 47859021180927,
+STORE, 47859021180928, 47859023020031,
+STORE, 47859021320192, 47859023020031,
+STORE, 47859021180928, 47859021320191,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022979071,
+STORE, 47859022979072, 47859023020031,
+STORE, 47859022663680, 47859022979071,
+STORE, 47859021320192, 47859022663679,
+ERASE, 47859021320192, 47859021320192,
+STORE, 47859021320192, 47859022663679,
+STORE, 47859022974976, 47859022979071,
+STORE, 47859022663680, 47859022974975,
+ERASE, 47859022663680, 47859022663680,
+STORE, 47859022663680, 47859022974975,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859023003647,
+ERASE, 47859023003648, 47859023003648,
+STORE, 47859023003648, 47859023020031,
+STORE, 47859023003648, 47859023032319,
+ERASE, 47859022979072, 47859022979072,
+STORE, 47859022979072, 47859022995455,
+STORE, 47859022995456, 47859023003647,
+ERASE, 47859021164544, 47859021164544,
+STORE, 47859021164544, 47859021168639,
+STORE, 47859021168640, 47859021172735,
+ERASE, 94094885199872, 94094885199872,
+STORE, 94094885199872, 94094885216255,
+STORE, 94094885216256, 94094885220351,
+ERASE, 139773774098432, 139773774098432,
+STORE, 139773774098432, 139773774102527,
+STORE, 139773774102528, 139773774106623,
+ERASE, 47859021058048, 47859021058048,
+STORE, 94094901108736, 94094901243903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736567963648, 140737488351231,
+ERASE, 140736567963648, 140736567963648,
+STORE, 140736567963648, 140736567967743,
+STORE, 94924425748480, 94924426461183,
+ERASE, 94924425748480, 94924425748480,
+STORE, 94924425748480, 94924425797631,
+STORE, 94924425797632, 94924426461183,
+ERASE, 94924425797632, 94924425797632,
+STORE, 94924425797632, 94924426342399,
+STORE, 94924426342400, 94924426440703,
+STORE, 94924426440704, 94924426461183,
+STORE, 140042126319616, 140042126491647,
+ERASE, 140042126319616, 140042126319616,
+STORE, 140042126319616, 140042126323711,
+STORE, 140042126323712, 140042126491647,
+ERASE, 140042126323712, 140042126323712,
+STORE, 140042126323712, 140042126446591,
+STORE, 140042126446592, 140042126479359,
+STORE, 140042126479360, 140042126487551,
+STORE, 140042126487552, 140042126491647,
+STORE, 140736568672256, 140736568676351,
+STORE, 140736568659968, 140736568672255,
+STORE, 47590668677120, 47590668685311,
+STORE, 47590668685312, 47590668693503,
+STORE, 47590668693504, 47590668799999,
+STORE, 47590668709888, 47590668799999,
+STORE, 47590668693504, 47590668709887,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668783615,
+STORE, 47590668783616, 47590668799999,
+STORE, 47590668763136, 47590668783615,
+STORE, 47590668709888, 47590668763135,
+ERASE, 47590668709888, 47590668709888,
+STORE, 47590668709888, 47590668763135,
+STORE, 47590668779520, 47590668783615,
+STORE, 47590668763136, 47590668779519,
+ERASE, 47590668763136, 47590668763136,
+STORE, 47590668763136, 47590668779519,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668791807,
+ERASE, 47590668791808, 47590668791808,
+STORE, 47590668791808, 47590668799999,
+STORE, 47590668800000, 47590670639103,
+STORE, 47590668939264, 47590670639103,
+STORE, 47590668800000, 47590668939263,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670598143,
+STORE, 47590670598144, 47590670639103,
+STORE, 47590670282752, 47590670598143,
+STORE, 47590668939264, 47590670282751,
+ERASE, 47590668939264, 47590668939264,
+STORE, 47590668939264, 47590670282751,
+STORE, 47590670594048, 47590670598143,
+STORE, 47590670282752, 47590670594047,
+ERASE, 47590670282752, 47590670282752,
+STORE, 47590670282752, 47590670594047,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670622719,
+ERASE, 47590670622720, 47590670622720,
+STORE, 47590670622720, 47590670639103,
+STORE, 47590670622720, 47590670651391,
+ERASE, 47590670598144, 47590670598144,
+STORE, 47590670598144, 47590670614527,
+STORE, 47590670614528, 47590670622719,
+ERASE, 47590668783616, 47590668783616,
+STORE, 47590668783616, 47590668787711,
+STORE, 47590668787712, 47590668791807,
+ERASE, 94924426440704, 94924426440704,
+STORE, 94924426440704, 94924426457087,
+STORE, 94924426457088, 94924426461183,
+ERASE, 140042126479360, 140042126479360,
+STORE, 140042126479360, 140042126483455,
+STORE, 140042126483456, 140042126487551,
+ERASE, 47590668677120, 47590668677120,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733281439744, 140737488351231,
+ERASE, 140733281439744, 140733281439744,
+STORE, 140733281439744, 140733281443839,
+STORE, 94490667069440, 94490667782143,
+ERASE, 94490667069440, 94490667069440,
+STORE, 94490667069440, 94490667118591,
+STORE, 94490667118592, 94490667782143,
+ERASE, 94490667118592, 94490667118592,
+STORE, 94490667118592, 94490667663359,
+STORE, 94490667663360, 94490667761663,
+STORE, 94490667761664, 94490667782143,
+STORE, 139878215118848, 139878215290879,
+ERASE, 139878215118848, 139878215118848,
+STORE, 139878215118848, 139878215122943,
+STORE, 139878215122944, 139878215290879,
+ERASE, 139878215122944, 139878215122944,
+STORE, 139878215122944, 139878215245823,
+STORE, 139878215245824, 139878215278591,
+STORE, 139878215278592, 139878215286783,
+STORE, 139878215286784, 139878215290879,
+STORE, 140733281464320, 140733281468415,
+STORE, 140733281452032, 140733281464319,
+STORE, 47754579877888, 47754579886079,
+STORE, 47754579886080, 47754579894271,
+STORE, 47754579894272, 47754580000767,
+STORE, 47754579910656, 47754580000767,
+STORE, 47754579894272, 47754579910655,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579984383,
+STORE, 47754579984384, 47754580000767,
+STORE, 47754579963904, 47754579984383,
+STORE, 47754579910656, 47754579963903,
+ERASE, 47754579910656, 47754579910656,
+STORE, 47754579910656, 47754579963903,
+STORE, 47754579980288, 47754579984383,
+STORE, 47754579963904, 47754579980287,
+ERASE, 47754579963904, 47754579963904,
+STORE, 47754579963904, 47754579980287,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579992575,
+ERASE, 47754579992576, 47754579992576,
+STORE, 47754579992576, 47754580000767,
+STORE, 47754580000768, 47754581839871,
+STORE, 47754580140032, 47754581839871,
+STORE, 47754580000768, 47754580140031,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581798911,
+STORE, 47754581798912, 47754581839871,
+STORE, 47754581483520, 47754581798911,
+STORE, 47754580140032, 47754581483519,
+ERASE, 47754580140032, 47754580140032,
+STORE, 47754580140032, 47754581483519,
+STORE, 47754581794816, 47754581798911,
+STORE, 47754581483520, 47754581794815,
+ERASE, 47754581483520, 47754581483520,
+STORE, 47754581483520, 47754581794815,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581823487,
+ERASE, 47754581823488, 47754581823488,
+STORE, 47754581823488, 47754581839871,
+STORE, 47754581823488, 47754581852159,
+ERASE, 47754581798912, 47754581798912,
+STORE, 47754581798912, 47754581815295,
+STORE, 47754581815296, 47754581823487,
+ERASE, 47754579984384, 47754579984384,
+STORE, 47754579984384, 47754579988479,
+STORE, 47754579988480, 47754579992575,
+ERASE, 94490667761664, 94490667761664,
+STORE, 94490667761664, 94490667778047,
+STORE, 94490667778048, 94490667782143,
+ERASE, 139878215278592, 139878215278592,
+STORE, 139878215278592, 139878215282687,
+STORE, 139878215282688, 139878215286783,
+ERASE, 47754579877888, 47754579877888,
+STORE, 94490669649920, 94490669785087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140735382188032, 140737488351231,
+ERASE, 140735382188032, 140735382188032,
+STORE, 140735382188032, 140735382192127,
+STORE, 94150181302272, 94150182014975,
+ERASE, 94150181302272, 94150181302272,
+STORE, 94150181302272, 94150181351423,
+STORE, 94150181351424, 94150182014975,
+ERASE, 94150181351424, 94150181351424,
+STORE, 94150181351424, 94150181896191,
+STORE, 94150181896192, 94150181994495,
+STORE, 94150181994496, 94150182014975,
+STORE, 139679752458240, 139679752630271,
+ERASE, 139679752458240, 139679752458240,
+STORE, 139679752458240, 139679752462335,
+STORE, 139679752462336, 139679752630271,
+ERASE, 139679752462336, 139679752462336,
+STORE, 139679752462336, 139679752585215,
+STORE, 139679752585216, 139679752617983,
+STORE, 139679752617984, 139679752626175,
+STORE, 139679752626176, 139679752630271,
+STORE, 140735382536192, 140735382540287,
+STORE, 140735382523904, 140735382536191,
+STORE, 47953042538496, 47953042546687,
+STORE, 47953042546688, 47953042554879,
+STORE, 47953042554880, 47953042661375,
+STORE, 47953042571264, 47953042661375,
+STORE, 47953042554880, 47953042571263,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042644991,
+STORE, 47953042644992, 47953042661375,
+STORE, 47953042624512, 47953042644991,
+STORE, 47953042571264, 47953042624511,
+ERASE, 47953042571264, 47953042571264,
+STORE, 47953042571264, 47953042624511,
+STORE, 47953042640896, 47953042644991,
+STORE, 47953042624512, 47953042640895,
+ERASE, 47953042624512, 47953042624512,
+STORE, 47953042624512, 47953042640895,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042653183,
+ERASE, 47953042653184, 47953042653184,
+STORE, 47953042653184, 47953042661375,
+STORE, 47953042661376, 47953044500479,
+STORE, 47953042800640, 47953044500479,
+STORE, 47953042661376, 47953042800639,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044459519,
+STORE, 47953044459520, 47953044500479,
+STORE, 47953044144128, 47953044459519,
+STORE, 47953042800640, 47953044144127,
+ERASE, 47953042800640, 47953042800640,
+STORE, 47953042800640, 47953044144127,
+STORE, 47953044455424, 47953044459519,
+STORE, 47953044144128, 47953044455423,
+ERASE, 47953044144128, 47953044144128,
+STORE, 47953044144128, 47953044455423,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044484095,
+ERASE, 47953044484096, 47953044484096,
+STORE, 47953044484096, 47953044500479,
+STORE, 47953044484096, 47953044512767,
+ERASE, 47953044459520, 47953044459520,
+STORE, 47953044459520, 47953044475903,
+STORE, 47953044475904, 47953044484095,
+ERASE, 47953042644992, 47953042644992,
+STORE, 47953042644992, 47953042649087,
+STORE, 47953042649088, 47953042653183,
+ERASE, 94150181994496, 94150181994496,
+STORE, 94150181994496, 94150182010879,
+STORE, 94150182010880, 94150182014975,
+ERASE, 139679752617984, 139679752617984,
+STORE, 139679752617984, 139679752622079,
+STORE, 139679752622080, 139679752626175,
+ERASE, 47953042538496, 47953042538496,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737044123648, 140737488351231,
+ERASE, 140737044123648, 140737044123648,
+STORE, 140737044123648, 140737044127743,
+STORE, 94425324294144, 94425325006847,
+ERASE, 94425324294144, 94425324294144,
+STORE, 94425324294144, 94425324343295,
+STORE, 94425324343296, 94425325006847,
+ERASE, 94425324343296, 94425324343296,
+STORE, 94425324343296, 94425324888063,
+STORE, 94425324888064, 94425324986367,
+STORE, 94425324986368, 94425325006847,
+STORE, 140382015016960, 140382015188991,
+ERASE, 140382015016960, 140382015016960,
+STORE, 140382015016960, 140382015021055,
+STORE, 140382015021056, 140382015188991,
+ERASE, 140382015021056, 140382015021056,
+STORE, 140382015021056, 140382015143935,
+STORE, 140382015143936, 140382015176703,
+STORE, 140382015176704, 140382015184895,
+STORE, 140382015184896, 140382015188991,
+STORE, 140737045585920, 140737045590015,
+STORE, 140737045573632, 140737045585919,
+STORE, 47250779979776, 47250779987967,
+STORE, 47250779987968, 47250779996159,
+STORE, 47250779996160, 47250780102655,
+STORE, 47250780012544, 47250780102655,
+STORE, 47250779996160, 47250780012543,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780086271,
+STORE, 47250780086272, 47250780102655,
+STORE, 47250780065792, 47250780086271,
+STORE, 47250780012544, 47250780065791,
+ERASE, 47250780012544, 47250780012544,
+STORE, 47250780012544, 47250780065791,
+STORE, 47250780082176, 47250780086271,
+STORE, 47250780065792, 47250780082175,
+ERASE, 47250780065792, 47250780065792,
+STORE, 47250780065792, 47250780082175,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780094463,
+ERASE, 47250780094464, 47250780094464,
+STORE, 47250780094464, 47250780102655,
+STORE, 47250780102656, 47250781941759,
+STORE, 47250780241920, 47250781941759,
+STORE, 47250780102656, 47250780241919,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781900799,
+STORE, 47250781900800, 47250781941759,
+STORE, 47250781585408, 47250781900799,
+STORE, 47250780241920, 47250781585407,
+ERASE, 47250780241920, 47250780241920,
+STORE, 47250780241920, 47250781585407,
+STORE, 47250781896704, 47250781900799,
+STORE, 47250781585408, 47250781896703,
+ERASE, 47250781585408, 47250781585408,
+STORE, 47250781585408, 47250781896703,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781925375,
+ERASE, 47250781925376, 47250781925376,
+STORE, 47250781925376, 47250781941759,
+STORE, 47250781925376, 47250781954047,
+ERASE, 47250781900800, 47250781900800,
+STORE, 47250781900800, 47250781917183,
+STORE, 47250781917184, 47250781925375,
+ERASE, 47250780086272, 47250780086272,
+STORE, 47250780086272, 47250780090367,
+STORE, 47250780090368, 47250780094463,
+ERASE, 94425324986368, 94425324986368,
+STORE, 94425324986368, 94425325002751,
+STORE, 94425325002752, 94425325006847,
+ERASE, 140382015176704, 140382015176704,
+STORE, 140382015176704, 140382015180799,
+STORE, 140382015180800, 140382015184895,
+ERASE, 47250779979776, 47250779979776,
+STORE, 94425351438336, 94425351573503,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736801144832, 140737488351231,
+ERASE, 140736801144832, 140736801144832,
+STORE, 140736801144832, 140736801148927,
+STORE, 94629429358592, 94629430071295,
+ERASE, 94629429358592, 94629429358592,
+STORE, 94629429358592, 94629429407743,
+STORE, 94629429407744, 94629430071295,
+ERASE, 94629429407744, 94629429407744,
+STORE, 94629429407744, 94629429952511,
+STORE, 94629429952512, 94629430050815,
+STORE, 94629430050816, 94629430071295,
+STORE, 139801685483520, 139801685655551,
+ERASE, 139801685483520, 139801685483520,
+STORE, 139801685483520, 139801685487615,
+STORE, 139801685487616, 139801685655551,
+ERASE, 139801685487616, 139801685487616,
+STORE, 139801685487616, 139801685610495,
+STORE, 139801685610496, 139801685643263,
+STORE, 139801685643264, 139801685651455,
+STORE, 139801685651456, 139801685655551,
+STORE, 140736801198080, 140736801202175,
+STORE, 140736801185792, 140736801198079,
+STORE, 47831109513216, 47831109521407,
+STORE, 47831109521408, 47831109529599,
+STORE, 47831109529600, 47831109636095,
+STORE, 47831109545984, 47831109636095,
+STORE, 47831109529600, 47831109545983,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109619711,
+STORE, 47831109619712, 47831109636095,
+STORE, 47831109599232, 47831109619711,
+STORE, 47831109545984, 47831109599231,
+ERASE, 47831109545984, 47831109545984,
+STORE, 47831109545984, 47831109599231,
+STORE, 47831109615616, 47831109619711,
+STORE, 47831109599232, 47831109615615,
+ERASE, 47831109599232, 47831109599232,
+STORE, 47831109599232, 47831109615615,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109627903,
+ERASE, 47831109627904, 47831109627904,
+STORE, 47831109627904, 47831109636095,
+STORE, 47831109636096, 47831111475199,
+STORE, 47831109775360, 47831111475199,
+STORE, 47831109636096, 47831109775359,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111434239,
+STORE, 47831111434240, 47831111475199,
+STORE, 47831111118848, 47831111434239,
+STORE, 47831109775360, 47831111118847,
+ERASE, 47831109775360, 47831109775360,
+STORE, 47831109775360, 47831111118847,
+STORE, 47831111430144, 47831111434239,
+STORE, 47831111118848, 47831111430143,
+ERASE, 47831111118848, 47831111118848,
+STORE, 47831111118848, 47831111430143,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111458815,
+ERASE, 47831111458816, 47831111458816,
+STORE, 47831111458816, 47831111475199,
+STORE, 47831111458816, 47831111487487,
+ERASE, 47831111434240, 47831111434240,
+STORE, 47831111434240, 47831111450623,
+STORE, 47831111450624, 47831111458815,
+ERASE, 47831109619712, 47831109619712,
+STORE, 47831109619712, 47831109623807,
+STORE, 47831109623808, 47831109627903,
+ERASE, 94629430050816, 94629430050816,
+STORE, 94629430050816, 94629430067199,
+STORE, 94629430067200, 94629430071295,
+ERASE, 139801685643264, 139801685643264,
+STORE, 139801685643264, 139801685647359,
+STORE, 139801685647360, 139801685651455,
+ERASE, 47831109513216, 47831109513216,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729419612160, 140737488351231,
+ERASE, 140729419612160, 140729419612160,
+STORE, 140729419612160, 140729419616255,
+STORE, 94443354148864, 94443354861567,
+ERASE, 94443354148864, 94443354148864,
+STORE, 94443354148864, 94443354198015,
+STORE, 94443354198016, 94443354861567,
+ERASE, 94443354198016, 94443354198016,
+STORE, 94443354198016, 94443354742783,
+STORE, 94443354742784, 94443354841087,
+STORE, 94443354841088, 94443354861567,
+STORE, 139741700038656, 139741700210687,
+ERASE, 139741700038656, 139741700038656,
+STORE, 139741700038656, 139741700042751,
+STORE, 139741700042752, 139741700210687,
+ERASE, 139741700042752, 139741700042752,
+STORE, 139741700042752, 139741700165631,
+STORE, 139741700165632, 139741700198399,
+STORE, 139741700198400, 139741700206591,
+STORE, 139741700206592, 139741700210687,
+STORE, 140729420574720, 140729420578815,
+STORE, 140729420562432, 140729420574719,
+STORE, 47891094958080, 47891094966271,
+STORE, 47891094966272, 47891094974463,
+STORE, 47891094974464, 47891095080959,
+STORE, 47891094990848, 47891095080959,
+STORE, 47891094974464, 47891094990847,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095064575,
+STORE, 47891095064576, 47891095080959,
+STORE, 47891095044096, 47891095064575,
+STORE, 47891094990848, 47891095044095,
+ERASE, 47891094990848, 47891094990848,
+STORE, 47891094990848, 47891095044095,
+STORE, 47891095060480, 47891095064575,
+STORE, 47891095044096, 47891095060479,
+ERASE, 47891095044096, 47891095044096,
+STORE, 47891095044096, 47891095060479,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095072767,
+ERASE, 47891095072768, 47891095072768,
+STORE, 47891095072768, 47891095080959,
+STORE, 47891095080960, 47891096920063,
+STORE, 47891095220224, 47891096920063,
+STORE, 47891095080960, 47891095220223,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096879103,
+STORE, 47891096879104, 47891096920063,
+STORE, 47891096563712, 47891096879103,
+STORE, 47891095220224, 47891096563711,
+ERASE, 47891095220224, 47891095220224,
+STORE, 47891095220224, 47891096563711,
+STORE, 47891096875008, 47891096879103,
+STORE, 47891096563712, 47891096875007,
+ERASE, 47891096563712, 47891096563712,
+STORE, 47891096563712, 47891096875007,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096903679,
+ERASE, 47891096903680, 47891096903680,
+STORE, 47891096903680, 47891096920063,
+STORE, 47891096903680, 47891096932351,
+ERASE, 47891096879104, 47891096879104,
+STORE, 47891096879104, 47891096895487,
+STORE, 47891096895488, 47891096903679,
+ERASE, 47891095064576, 47891095064576,
+STORE, 47891095064576, 47891095068671,
+STORE, 47891095068672, 47891095072767,
+ERASE, 94443354841088, 94443354841088,
+STORE, 94443354841088, 94443354857471,
+STORE, 94443354857472, 94443354861567,
+ERASE, 139741700198400, 139741700198400,
+STORE, 139741700198400, 139741700202495,
+STORE, 139741700202496, 139741700206591,
+ERASE, 47891094958080, 47891094958080,
+STORE, 94443360825344, 94443360960511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722961661952, 140737488351231,
+ERASE, 140722961661952, 140722961661952,
+STORE, 140722961661952, 140722961666047,
+STORE, 94878388944896, 94878389657599,
+ERASE, 94878388944896, 94878388944896,
+STORE, 94878388944896, 94878388994047,
+STORE, 94878388994048, 94878389657599,
+ERASE, 94878388994048, 94878388994048,
+STORE, 94878388994048, 94878389538815,
+STORE, 94878389538816, 94878389637119,
+STORE, 94878389637120, 94878389657599,
+STORE, 140210690056192, 140210690228223,
+ERASE, 140210690056192, 140210690056192,
+STORE, 140210690056192, 140210690060287,
+STORE, 140210690060288, 140210690228223,
+ERASE, 140210690060288, 140210690060288,
+STORE, 140210690060288, 140210690183167,
+STORE, 140210690183168, 140210690215935,
+STORE, 140210690215936, 140210690224127,
+STORE, 140210690224128, 140210690228223,
+STORE, 140722963148800, 140722963152895,
+STORE, 140722963136512, 140722963148799,
+STORE, 47422104940544, 47422104948735,
+STORE, 47422104948736, 47422104956927,
+STORE, 47422104956928, 47422105063423,
+STORE, 47422104973312, 47422105063423,
+STORE, 47422104956928, 47422104973311,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105047039,
+STORE, 47422105047040, 47422105063423,
+STORE, 47422105026560, 47422105047039,
+STORE, 47422104973312, 47422105026559,
+ERASE, 47422104973312, 47422104973312,
+STORE, 47422104973312, 47422105026559,
+STORE, 47422105042944, 47422105047039,
+STORE, 47422105026560, 47422105042943,
+ERASE, 47422105026560, 47422105026560,
+STORE, 47422105026560, 47422105042943,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105055231,
+ERASE, 47422105055232, 47422105055232,
+STORE, 47422105055232, 47422105063423,
+STORE, 47422105063424, 47422106902527,
+STORE, 47422105202688, 47422106902527,
+STORE, 47422105063424, 47422105202687,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106861567,
+STORE, 47422106861568, 47422106902527,
+STORE, 47422106546176, 47422106861567,
+STORE, 47422105202688, 47422106546175,
+ERASE, 47422105202688, 47422105202688,
+STORE, 47422105202688, 47422106546175,
+STORE, 47422106857472, 47422106861567,
+STORE, 47422106546176, 47422106857471,
+ERASE, 47422106546176, 47422106546176,
+STORE, 47422106546176, 47422106857471,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106886143,
+ERASE, 47422106886144, 47422106886144,
+STORE, 47422106886144, 47422106902527,
+STORE, 47422106886144, 47422106914815,
+ERASE, 47422106861568, 47422106861568,
+STORE, 47422106861568, 47422106877951,
+STORE, 47422106877952, 47422106886143,
+ERASE, 47422105047040, 47422105047040,
+STORE, 47422105047040, 47422105051135,
+STORE, 47422105051136, 47422105055231,
+ERASE, 94878389637120, 94878389637120,
+STORE, 94878389637120, 94878389653503,
+STORE, 94878389653504, 94878389657599,
+ERASE, 140210690215936, 140210690215936,
+STORE, 140210690215936, 140210690220031,
+STORE, 140210690220032, 140210690224127,
+ERASE, 47422104940544, 47422104940544,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727690309632, 140737488351231,
+ERASE, 140727690309632, 140727690309632,
+STORE, 140727690309632, 140727690313727,
+STORE, 94121892208640, 94121892921343,
+ERASE, 94121892208640, 94121892208640,
+STORE, 94121892208640, 94121892257791,
+STORE, 94121892257792, 94121892921343,
+ERASE, 94121892257792, 94121892257792,
+STORE, 94121892257792, 94121892802559,
+STORE, 94121892802560, 94121892900863,
+STORE, 94121892900864, 94121892921343,
+STORE, 140662438326272, 140662438498303,
+ERASE, 140662438326272, 140662438326272,
+STORE, 140662438326272, 140662438330367,
+STORE, 140662438330368, 140662438498303,
+ERASE, 140662438330368, 140662438330368,
+STORE, 140662438330368, 140662438453247,
+STORE, 140662438453248, 140662438486015,
+STORE, 140662438486016, 140662438494207,
+STORE, 140662438494208, 140662438498303,
+STORE, 140727690379264, 140727690383359,
+STORE, 140727690366976, 140727690379263,
+STORE, 46970356670464, 46970356678655,
+STORE, 46970356678656, 46970356686847,
+STORE, 46970356686848, 46970356793343,
+STORE, 46970356703232, 46970356793343,
+STORE, 46970356686848, 46970356703231,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356776959,
+STORE, 46970356776960, 46970356793343,
+STORE, 46970356756480, 46970356776959,
+STORE, 46970356703232, 46970356756479,
+ERASE, 46970356703232, 46970356703232,
+STORE, 46970356703232, 46970356756479,
+STORE, 46970356772864, 46970356776959,
+STORE, 46970356756480, 46970356772863,
+ERASE, 46970356756480, 46970356756480,
+STORE, 46970356756480, 46970356772863,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356785151,
+ERASE, 46970356785152, 46970356785152,
+STORE, 46970356785152, 46970356793343,
+STORE, 46970356793344, 46970358632447,
+STORE, 46970356932608, 46970358632447,
+STORE, 46970356793344, 46970356932607,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358591487,
+STORE, 46970358591488, 46970358632447,
+STORE, 46970358276096, 46970358591487,
+STORE, 46970356932608, 46970358276095,
+ERASE, 46970356932608, 46970356932608,
+STORE, 46970356932608, 46970358276095,
+STORE, 46970358587392, 46970358591487,
+STORE, 46970358276096, 46970358587391,
+ERASE, 46970358276096, 46970358276096,
+STORE, 46970358276096, 46970358587391,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358616063,
+ERASE, 46970358616064, 46970358616064,
+STORE, 46970358616064, 46970358632447,
+STORE, 46970358616064, 46970358644735,
+ERASE, 46970358591488, 46970358591488,
+STORE, 46970358591488, 46970358607871,
+STORE, 46970358607872, 46970358616063,
+ERASE, 46970356776960, 46970356776960,
+STORE, 46970356776960, 46970356781055,
+STORE, 46970356781056, 46970356785151,
+ERASE, 94121892900864, 94121892900864,
+STORE, 94121892900864, 94121892917247,
+STORE, 94121892917248, 94121892921343,
+ERASE, 140662438486016, 140662438486016,
+STORE, 140662438486016, 140662438490111,
+STORE, 140662438490112, 140662438494207,
+ERASE, 46970356670464, 46970356670464,
+STORE, 94121898610688, 94121898745855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737189351424, 140737488351231,
+ERASE, 140737189351424, 140737189351424,
+STORE, 140737189351424, 140737189355519,
+STORE, 93847948832768, 93847949545471,
+ERASE, 93847948832768, 93847948832768,
+STORE, 93847948832768, 93847948881919,
+STORE, 93847948881920, 93847949545471,
+ERASE, 93847948881920, 93847948881920,
+STORE, 93847948881920, 93847949426687,
+STORE, 93847949426688, 93847949524991,
+STORE, 93847949524992, 93847949545471,
+STORE, 139698989985792, 139698990157823,
+ERASE, 139698989985792, 139698989985792,
+STORE, 139698989985792, 139698989989887,
+STORE, 139698989989888, 139698990157823,
+ERASE, 139698989989888, 139698989989888,
+STORE, 139698989989888, 139698990112767,
+STORE, 139698990112768, 139698990145535,
+STORE, 139698990145536, 139698990153727,
+STORE, 139698990153728, 139698990157823,
+STORE, 140737189744640, 140737189748735,
+STORE, 140737189732352, 140737189744639,
+STORE, 47933805010944, 47933805019135,
+STORE, 47933805019136, 47933805027327,
+STORE, 47933805027328, 47933805133823,
+STORE, 47933805043712, 47933805133823,
+STORE, 47933805027328, 47933805043711,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805117439,
+STORE, 47933805117440, 47933805133823,
+STORE, 47933805096960, 47933805117439,
+STORE, 47933805043712, 47933805096959,
+ERASE, 47933805043712, 47933805043712,
+STORE, 47933805043712, 47933805096959,
+STORE, 47933805113344, 47933805117439,
+STORE, 47933805096960, 47933805113343,
+ERASE, 47933805096960, 47933805096960,
+STORE, 47933805096960, 47933805113343,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805125631,
+ERASE, 47933805125632, 47933805125632,
+STORE, 47933805125632, 47933805133823,
+STORE, 47933805133824, 47933806972927,
+STORE, 47933805273088, 47933806972927,
+STORE, 47933805133824, 47933805273087,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806931967,
+STORE, 47933806931968, 47933806972927,
+STORE, 47933806616576, 47933806931967,
+STORE, 47933805273088, 47933806616575,
+ERASE, 47933805273088, 47933805273088,
+STORE, 47933805273088, 47933806616575,
+STORE, 47933806927872, 47933806931967,
+STORE, 47933806616576, 47933806927871,
+ERASE, 47933806616576, 47933806616576,
+STORE, 47933806616576, 47933806927871,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806956543,
+ERASE, 47933806956544, 47933806956544,
+STORE, 47933806956544, 47933806972927,
+STORE, 47933806956544, 47933806985215,
+ERASE, 47933806931968, 47933806931968,
+STORE, 47933806931968, 47933806948351,
+STORE, 47933806948352, 47933806956543,
+ERASE, 47933805117440, 47933805117440,
+STORE, 47933805117440, 47933805121535,
+STORE, 47933805121536, 47933805125631,
+ERASE, 93847949524992, 93847949524992,
+STORE, 93847949524992, 93847949541375,
+STORE, 93847949541376, 93847949545471,
+ERASE, 139698990145536, 139698990145536,
+STORE, 139698990145536, 139698990149631,
+STORE, 139698990149632, 139698990153727,
+ERASE, 47933805010944, 47933805010944,
+STORE, 140737488347136, 140737488351231,
+STORE, 140725553991680, 140737488351231,
+ERASE, 140725553991680, 140725553991680,
+STORE, 140725553991680, 140725553995775,
+STORE, 93980056248320, 93980056961023,
+ERASE, 93980056248320, 93980056248320,
+STORE, 93980056248320, 93980056297471,
+STORE, 93980056297472, 93980056961023,
+ERASE, 93980056297472, 93980056297472,
+STORE, 93980056297472, 93980056842239,
+STORE, 93980056842240, 93980056940543,
+STORE, 93980056940544, 93980056961023,
+STORE, 140146588971008, 140146589143039,
+ERASE, 140146588971008, 140146588971008,
+STORE, 140146588971008, 140146588975103,
+STORE, 140146588975104, 140146589143039,
+ERASE, 140146588975104, 140146588975104,
+STORE, 140146588975104, 140146589097983,
+STORE, 140146589097984, 140146589130751,
+STORE, 140146589130752, 140146589138943,
+STORE, 140146589138944, 140146589143039,
+STORE, 140725554860032, 140725554864127,
+STORE, 140725554847744, 140725554860031,
+STORE, 47486206025728, 47486206033919,
+STORE, 47486206033920, 47486206042111,
+STORE, 47486206042112, 47486206148607,
+STORE, 47486206058496, 47486206148607,
+STORE, 47486206042112, 47486206058495,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206132223,
+STORE, 47486206132224, 47486206148607,
+STORE, 47486206111744, 47486206132223,
+STORE, 47486206058496, 47486206111743,
+ERASE, 47486206058496, 47486206058496,
+STORE, 47486206058496, 47486206111743,
+STORE, 47486206128128, 47486206132223,
+STORE, 47486206111744, 47486206128127,
+ERASE, 47486206111744, 47486206111744,
+STORE, 47486206111744, 47486206128127,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206140415,
+ERASE, 47486206140416, 47486206140416,
+STORE, 47486206140416, 47486206148607,
+STORE, 47486206148608, 47486207987711,
+STORE, 47486206287872, 47486207987711,
+STORE, 47486206148608, 47486206287871,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207946751,
+STORE, 47486207946752, 47486207987711,
+STORE, 47486207631360, 47486207946751,
+STORE, 47486206287872, 47486207631359,
+ERASE, 47486206287872, 47486206287872,
+STORE, 47486206287872, 47486207631359,
+STORE, 47486207942656, 47486207946751,
+STORE, 47486207631360, 47486207942655,
+ERASE, 47486207631360, 47486207631360,
+STORE, 47486207631360, 47486207942655,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207971327,
+ERASE, 47486207971328, 47486207971328,
+STORE, 47486207971328, 47486207987711,
+STORE, 47486207971328, 47486207999999,
+ERASE, 47486207946752, 47486207946752,
+STORE, 47486207946752, 47486207963135,
+STORE, 47486207963136, 47486207971327,
+ERASE, 47486206132224, 47486206132224,
+STORE, 47486206132224, 47486206136319,
+STORE, 47486206136320, 47486206140415,
+ERASE, 93980056940544, 93980056940544,
+STORE, 93980056940544, 93980056956927,
+STORE, 93980056956928, 93980056961023,
+ERASE, 140146589130752, 140146589130752,
+STORE, 140146589130752, 140146589134847,
+STORE, 140146589134848, 140146589138943,
+ERASE, 47486206025728, 47486206025728,
+STORE, 93980070006784, 93980070141951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727334776832, 140737488351231,
+ERASE, 140727334776832, 140727334776832,
+STORE, 140727334776832, 140727334780927,
+STORE, 94049747247104, 94049747959807,
+ERASE, 94049747247104, 94049747247104,
+STORE, 94049747247104, 94049747296255,
+STORE, 94049747296256, 94049747959807,
+ERASE, 94049747296256, 94049747296256,
+STORE, 94049747296256, 94049747841023,
+STORE, 94049747841024, 94049747939327,
+STORE, 94049747939328, 94049747959807,
+STORE, 140227307216896, 140227307388927,
+ERASE, 140227307216896, 140227307216896,
+STORE, 140227307216896, 140227307220991,
+STORE, 140227307220992, 140227307388927,
+ERASE, 140227307220992, 140227307220992,
+STORE, 140227307220992, 140227307343871,
+STORE, 140227307343872, 140227307376639,
+STORE, 140227307376640, 140227307384831,
+STORE, 140227307384832, 140227307388927,
+STORE, 140727335337984, 140727335342079,
+STORE, 140727335325696, 140727335337983,
+STORE, 47405487779840, 47405487788031,
+STORE, 47405487788032, 47405487796223,
+STORE, 47405487796224, 47405487902719,
+STORE, 47405487812608, 47405487902719,
+STORE, 47405487796224, 47405487812607,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487886335,
+STORE, 47405487886336, 47405487902719,
+STORE, 47405487865856, 47405487886335,
+STORE, 47405487812608, 47405487865855,
+ERASE, 47405487812608, 47405487812608,
+STORE, 47405487812608, 47405487865855,
+STORE, 47405487882240, 47405487886335,
+STORE, 47405487865856, 47405487882239,
+ERASE, 47405487865856, 47405487865856,
+STORE, 47405487865856, 47405487882239,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487894527,
+ERASE, 47405487894528, 47405487894528,
+STORE, 47405487894528, 47405487902719,
+STORE, 47405487902720, 47405489741823,
+STORE, 47405488041984, 47405489741823,
+STORE, 47405487902720, 47405488041983,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489700863,
+STORE, 47405489700864, 47405489741823,
+STORE, 47405489385472, 47405489700863,
+STORE, 47405488041984, 47405489385471,
+ERASE, 47405488041984, 47405488041984,
+STORE, 47405488041984, 47405489385471,
+STORE, 47405489696768, 47405489700863,
+STORE, 47405489385472, 47405489696767,
+ERASE, 47405489385472, 47405489385472,
+STORE, 47405489385472, 47405489696767,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489725439,
+ERASE, 47405489725440, 47405489725440,
+STORE, 47405489725440, 47405489741823,
+STORE, 47405489725440, 47405489754111,
+ERASE, 47405489700864, 47405489700864,
+STORE, 47405489700864, 47405489717247,
+STORE, 47405489717248, 47405489725439,
+ERASE, 47405487886336, 47405487886336,
+STORE, 47405487886336, 47405487890431,
+STORE, 47405487890432, 47405487894527,
+ERASE, 94049747939328, 94049747939328,
+STORE, 94049747939328, 94049747955711,
+STORE, 94049747955712, 94049747959807,
+ERASE, 140227307376640, 140227307376640,
+STORE, 140227307376640, 140227307380735,
+STORE, 140227307380736, 140227307384831,
+ERASE, 47405487779840, 47405487779840,
+STORE, 94049758810112, 94049758945279,
+STORE, 140737488347136, 140737488351231,
+STORE, 140727079718912, 140737488351231,
+ERASE, 140727079718912, 140727079718912,
+STORE, 140727079718912, 140727079723007,
+STORE, 94250996527104, 94250997239807,
+ERASE, 94250996527104, 94250996527104,
+STORE, 94250996527104, 94250996576255,
+STORE, 94250996576256, 94250997239807,
+ERASE, 94250996576256, 94250996576256,
+STORE, 94250996576256, 94250997121023,
+STORE, 94250997121024, 94250997219327,
+STORE, 94250997219328, 94250997239807,
+STORE, 140060022587392, 140060022759423,
+ERASE, 140060022587392, 140060022587392,
+STORE, 140060022587392, 140060022591487,
+STORE, 140060022591488, 140060022759423,
+ERASE, 140060022591488, 140060022591488,
+STORE, 140060022591488, 140060022714367,
+STORE, 140060022714368, 140060022747135,
+STORE, 140060022747136, 140060022755327,
+STORE, 140060022755328, 140060022759423,
+STORE, 140727079788544, 140727079792639,
+STORE, 140727079776256, 140727079788543,
+STORE, 47572772409344, 47572772417535,
+STORE, 47572772417536, 47572772425727,
+STORE, 47572772425728, 47572772532223,
+STORE, 47572772442112, 47572772532223,
+STORE, 47572772425728, 47572772442111,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772515839,
+STORE, 47572772515840, 47572772532223,
+STORE, 47572772495360, 47572772515839,
+STORE, 47572772442112, 47572772495359,
+ERASE, 47572772442112, 47572772442112,
+STORE, 47572772442112, 47572772495359,
+STORE, 47572772511744, 47572772515839,
+STORE, 47572772495360, 47572772511743,
+ERASE, 47572772495360, 47572772495360,
+STORE, 47572772495360, 47572772511743,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772524031,
+ERASE, 47572772524032, 47572772524032,
+STORE, 47572772524032, 47572772532223,
+STORE, 47572772532224, 47572774371327,
+STORE, 47572772671488, 47572774371327,
+STORE, 47572772532224, 47572772671487,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774330367,
+STORE, 47572774330368, 47572774371327,
+STORE, 47572774014976, 47572774330367,
+STORE, 47572772671488, 47572774014975,
+ERASE, 47572772671488, 47572772671488,
+STORE, 47572772671488, 47572774014975,
+STORE, 47572774326272, 47572774330367,
+STORE, 47572774014976, 47572774326271,
+ERASE, 47572774014976, 47572774014976,
+STORE, 47572774014976, 47572774326271,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774354943,
+ERASE, 47572774354944, 47572774354944,
+STORE, 47572774354944, 47572774371327,
+STORE, 47572774354944, 47572774383615,
+ERASE, 47572774330368, 47572774330368,
+STORE, 47572774330368, 47572774346751,
+STORE, 47572774346752, 47572774354943,
+ERASE, 47572772515840, 47572772515840,
+STORE, 47572772515840, 47572772519935,
+STORE, 47572772519936, 47572772524031,
+ERASE, 94250997219328, 94250997219328,
+STORE, 94250997219328, 94250997235711,
+STORE, 94250997235712, 94250997239807,
+ERASE, 140060022747136, 140060022747136,
+STORE, 140060022747136, 140060022751231,
+STORE, 140060022751232, 140060022755327,
+ERASE, 47572772409344, 47572772409344,
+STORE, 94251018305536, 94251018440703,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730012389376, 140737488351231,
+ERASE, 140730012389376, 140730012389376,
+STORE, 140730012389376, 140730012393471,
+STORE, 94382607675392, 94382607695871,
+ERASE, 94382607675392, 94382607675392,
+STORE, 94382607675392, 94382607679487,
+STORE, 94382607679488, 94382607695871,
+ERASE, 94382607679488, 94382607679488,
+STORE, 94382607679488, 94382607683583,
+STORE, 94382607683584, 94382607687679,
+STORE, 94382607687680, 94382607695871,
+STORE, 140252451454976, 140252451627007,
+ERASE, 140252451454976, 140252451454976,
+STORE, 140252451454976, 140252451459071,
+STORE, 140252451459072, 140252451627007,
+ERASE, 140252451459072, 140252451459072,
+STORE, 140252451459072, 140252451581951,
+STORE, 140252451581952, 140252451614719,
+STORE, 140252451614720, 140252451622911,
+STORE, 140252451622912, 140252451627007,
+STORE, 140730013548544, 140730013552639,
+STORE, 140730013536256, 140730013548543,
+STORE, 47380343541760, 47380343549951,
+STORE, 47380343549952, 47380343558143,
+STORE, 47380343558144, 47380345397247,
+STORE, 47380343697408, 47380345397247,
+STORE, 47380343558144, 47380343697407,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345356287,
+STORE, 47380345356288, 47380345397247,
+STORE, 47380345040896, 47380345356287,
+STORE, 47380343697408, 47380345040895,
+ERASE, 47380343697408, 47380343697408,
+STORE, 47380343697408, 47380345040895,
+STORE, 47380345352192, 47380345356287,
+STORE, 47380345040896, 47380345352191,
+ERASE, 47380345040896, 47380345040896,
+STORE, 47380345040896, 47380345352191,
+STORE, 47380345380864, 47380345397247,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345380863,
+ERASE, 47380345380864, 47380345380864,
+STORE, 47380345380864, 47380345397247,
+ERASE, 47380345356288, 47380345356288,
+STORE, 47380345356288, 47380345372671,
+STORE, 47380345372672, 47380345380863,
+ERASE, 94382607687680, 94382607687680,
+STORE, 94382607687680, 94382607691775,
+STORE, 94382607691776, 94382607695871,
+ERASE, 140252451614720, 140252451614720,
+STORE, 140252451614720, 140252451618815,
+STORE, 140252451618816, 140252451622911,
+ERASE, 47380343541760, 47380343541760,
+STORE, 94382626803712, 94382626938879,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730900271104, 140737488351231,
+ERASE, 140730900271104, 140730900271104,
+STORE, 140730900271104, 140730900275199,
+STORE, 93855478120448, 93855478337535,
+ERASE, 93855478120448, 93855478120448,
+STORE, 93855478120448, 93855478198271,
+STORE, 93855478198272, 93855478337535,
+ERASE, 93855478198272, 93855478198272,
+STORE, 93855478198272, 93855478243327,
+STORE, 93855478243328, 93855478288383,
+STORE, 93855478288384, 93855478337535,
+STORE, 140092686573568, 140092686745599,
+ERASE, 140092686573568, 140092686573568,
+STORE, 140092686573568, 140092686577663,
+STORE, 140092686577664, 140092686745599,
+ERASE, 140092686577664, 140092686577664,
+STORE, 140092686577664, 140092686700543,
+STORE, 140092686700544, 140092686733311,
+STORE, 140092686733312, 140092686741503,
+STORE, 140092686741504, 140092686745599,
+STORE, 140730900537344, 140730900541439,
+STORE, 140730900525056, 140730900537343,
+STORE, 47540108423168, 47540108431359,
+STORE, 47540108431360, 47540108439551,
+STORE, 47540108439552, 47540110278655,
+STORE, 47540108578816, 47540110278655,
+STORE, 47540108439552, 47540108578815,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540110237695,
+STORE, 47540110237696, 47540110278655,
+STORE, 47540109922304, 47540110237695,
+STORE, 47540108578816, 47540109922303,
+ERASE, 47540108578816, 47540108578816,
+STORE, 47540108578816, 47540109922303,
+STORE, 47540110233600, 47540110237695,
+STORE, 47540109922304, 47540110233599,
+ERASE, 47540109922304, 47540109922304,
+STORE, 47540109922304, 47540110233599,
+STORE, 47540110262272, 47540110278655,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110262271,
+ERASE, 47540110262272, 47540110262272,
+STORE, 47540110262272, 47540110278655,
+ERASE, 47540110237696, 47540110237696,
+STORE, 47540110237696, 47540110254079,
+STORE, 47540110254080, 47540110262271,
+ERASE, 93855478288384, 93855478288384,
+STORE, 93855478288384, 93855478333439,
+STORE, 93855478333440, 93855478337535,
+ERASE, 140092686733312, 140092686733312,
+STORE, 140092686733312, 140092686737407,
+STORE, 140092686737408, 140092686741503,
+ERASE, 47540108423168, 47540108423168,
+STORE, 93855492222976, 93855492358143,
+STORE, 93855492222976, 93855492493311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733498146816, 140737488351231,
+ERASE, 140733498146816, 140733498146816,
+STORE, 140733498146816, 140733498150911,
+STORE, 94170739654656, 94170740367359,
+ERASE, 94170739654656, 94170739654656,
+STORE, 94170739654656, 94170739703807,
+STORE, 94170739703808, 94170740367359,
+ERASE, 94170739703808, 94170739703808,
+STORE, 94170739703808, 94170740248575,
+STORE, 94170740248576, 94170740346879,
+STORE, 94170740346880, 94170740367359,
+STORE, 140024788877312, 140024789049343,
+ERASE, 140024788877312, 140024788877312,
+STORE, 140024788877312, 140024788881407,
+STORE, 140024788881408, 140024789049343,
+ERASE, 140024788881408, 140024788881408,
+STORE, 140024788881408, 140024789004287,
+STORE, 140024789004288, 140024789037055,
+STORE, 140024789037056, 140024789045247,
+STORE, 140024789045248, 140024789049343,
+STORE, 140733499023360, 140733499027455,
+STORE, 140733499011072, 140733499023359,
+STORE, 47608006119424, 47608006127615,
+STORE, 47608006127616, 47608006135807,
+STORE, 47608006135808, 47608006242303,
+STORE, 47608006152192, 47608006242303,
+STORE, 47608006135808, 47608006152191,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006225919,
+STORE, 47608006225920, 47608006242303,
+STORE, 47608006205440, 47608006225919,
+STORE, 47608006152192, 47608006205439,
+ERASE, 47608006152192, 47608006152192,
+STORE, 47608006152192, 47608006205439,
+STORE, 47608006221824, 47608006225919,
+STORE, 47608006205440, 47608006221823,
+ERASE, 47608006205440, 47608006205440,
+STORE, 47608006205440, 47608006221823,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006234111,
+ERASE, 47608006234112, 47608006234112,
+STORE, 47608006234112, 47608006242303,
+STORE, 47608006242304, 47608008081407,
+STORE, 47608006381568, 47608008081407,
+STORE, 47608006242304, 47608006381567,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608008040447,
+STORE, 47608008040448, 47608008081407,
+STORE, 47608007725056, 47608008040447,
+STORE, 47608006381568, 47608007725055,
+ERASE, 47608006381568, 47608006381568,
+STORE, 47608006381568, 47608007725055,
+STORE, 47608008036352, 47608008040447,
+STORE, 47608007725056, 47608008036351,
+ERASE, 47608007725056, 47608007725056,
+STORE, 47608007725056, 47608008036351,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008065023,
+ERASE, 47608008065024, 47608008065024,
+STORE, 47608008065024, 47608008081407,
+STORE, 47608008065024, 47608008093695,
+ERASE, 47608008040448, 47608008040448,
+STORE, 47608008040448, 47608008056831,
+STORE, 47608008056832, 47608008065023,
+ERASE, 47608006225920, 47608006225920,
+STORE, 47608006225920, 47608006230015,
+STORE, 47608006230016, 47608006234111,
+ERASE, 94170740346880, 94170740346880,
+STORE, 94170740346880, 94170740363263,
+STORE, 94170740363264, 94170740367359,
+ERASE, 140024789037056, 140024789037056,
+STORE, 140024789037056, 140024789041151,
+STORE, 140024789041152, 140024789045247,
+ERASE, 47608006119424, 47608006119424,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730264326144, 140737488351231,
+ERASE, 140730264326144, 140730264326144,
+STORE, 140730264326144, 140730264330239,
+STORE, 94653216407552, 94653217120255,
+ERASE, 94653216407552, 94653216407552,
+STORE, 94653216407552, 94653216456703,
+STORE, 94653216456704, 94653217120255,
+ERASE, 94653216456704, 94653216456704,
+STORE, 94653216456704, 94653217001471,
+STORE, 94653217001472, 94653217099775,
+STORE, 94653217099776, 94653217120255,
+STORE, 140103617011712, 140103617183743,
+ERASE, 140103617011712, 140103617011712,
+STORE, 140103617011712, 140103617015807,
+STORE, 140103617015808, 140103617183743,
+ERASE, 140103617015808, 140103617015808,
+STORE, 140103617015808, 140103617138687,
+STORE, 140103617138688, 140103617171455,
+STORE, 140103617171456, 140103617179647,
+STORE, 140103617179648, 140103617183743,
+STORE, 140730265427968, 140730265432063,
+STORE, 140730265415680, 140730265427967,
+STORE, 47529177985024, 47529177993215,
+STORE, 47529177993216, 47529178001407,
+STORE, 47529178001408, 47529178107903,
+STORE, 47529178017792, 47529178107903,
+STORE, 47529178001408, 47529178017791,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178091519,
+STORE, 47529178091520, 47529178107903,
+STORE, 47529178071040, 47529178091519,
+STORE, 47529178017792, 47529178071039,
+ERASE, 47529178017792, 47529178017792,
+STORE, 47529178017792, 47529178071039,
+STORE, 47529178087424, 47529178091519,
+STORE, 47529178071040, 47529178087423,
+ERASE, 47529178071040, 47529178071040,
+STORE, 47529178071040, 47529178087423,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178099711,
+ERASE, 47529178099712, 47529178099712,
+STORE, 47529178099712, 47529178107903,
+STORE, 47529178107904, 47529179947007,
+STORE, 47529178247168, 47529179947007,
+STORE, 47529178107904, 47529178247167,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179906047,
+STORE, 47529179906048, 47529179947007,
+STORE, 47529179590656, 47529179906047,
+STORE, 47529178247168, 47529179590655,
+ERASE, 47529178247168, 47529178247168,
+STORE, 47529178247168, 47529179590655,
+STORE, 47529179901952, 47529179906047,
+STORE, 47529179590656, 47529179901951,
+ERASE, 47529179590656, 47529179590656,
+STORE, 47529179590656, 47529179901951,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179930623,
+ERASE, 47529179930624, 47529179930624,
+STORE, 47529179930624, 47529179947007,
+STORE, 47529179930624, 47529179959295,
+ERASE, 47529179906048, 47529179906048,
+STORE, 47529179906048, 47529179922431,
+STORE, 47529179922432, 47529179930623,
+ERASE, 47529178091520, 47529178091520,
+STORE, 47529178091520, 47529178095615,
+STORE, 47529178095616, 47529178099711,
+ERASE, 94653217099776, 94653217099776,
+STORE, 94653217099776, 94653217116159,
+STORE, 94653217116160, 94653217120255,
+ERASE, 140103617171456, 140103617171456,
+STORE, 140103617171456, 140103617175551,
+STORE, 140103617175552, 140103617179647,
+ERASE, 47529177985024, 47529177985024,
+STORE, 94653241135104, 94653241270271,
+STORE, 140737488347136, 140737488351231,
+STORE, 140736284549120, 140737488351231,
+ERASE, 140736284549120, 140736284549120,
+STORE, 140736284549120, 140736284553215,
+STORE, 93963663822848, 93963664506879,
+ERASE, 93963663822848, 93963663822848,
+STORE, 93963663822848, 93963663884287,
+STORE, 93963663884288, 93963664506879,
+ERASE, 93963663884288, 93963663884288,
+STORE, 93963663884288, 93963664240639,
+STORE, 93963664240640, 93963664379903,
+STORE, 93963664379904, 93963664506879,
+STORE, 140450188439552, 140450188611583,
+ERASE, 140450188439552, 140450188439552,
+STORE, 140450188439552, 140450188443647,
+STORE, 140450188443648, 140450188611583,
+ERASE, 140450188443648, 140450188443648,
+STORE, 140450188443648, 140450188566527,
+STORE, 140450188566528, 140450188599295,
+STORE, 140450188599296, 140450188607487,
+STORE, 140450188607488, 140450188611583,
+STORE, 140736284577792, 140736284581887,
+STORE, 140736284565504, 140736284577791,
+STORE, 47182606557184, 47182606565375,
+STORE, 47182606565376, 47182606573567,
+STORE, 47182606573568, 47182608412671,
+STORE, 47182606712832, 47182608412671,
+STORE, 47182606573568, 47182606712831,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608371711,
+STORE, 47182608371712, 47182608412671,
+STORE, 47182608056320, 47182608371711,
+STORE, 47182606712832, 47182608056319,
+ERASE, 47182606712832, 47182606712832,
+STORE, 47182606712832, 47182608056319,
+STORE, 47182608367616, 47182608371711,
+STORE, 47182608056320, 47182608367615,
+ERASE, 47182608056320, 47182608056320,
+STORE, 47182608056320, 47182608367615,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608396287,
+ERASE, 47182608396288, 47182608396288,
+STORE, 47182608396288, 47182608412671,
+STORE, 47182608412672, 47182608523263,
+STORE, 47182608429056, 47182608523263,
+STORE, 47182608412672, 47182608429055,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608515071,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608490496, 47182608515071,
+STORE, 47182608429056, 47182608490495,
+ERASE, 47182608429056, 47182608429056,
+STORE, 47182608429056, 47182608490495,
+STORE, 47182608510976, 47182608515071,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608490496, 47182608490496,
+STORE, 47182608490496, 47182608510975,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608523263,
+STORE, 47182608523264, 47182608568319,
+ERASE, 47182608523264, 47182608523264,
+STORE, 47182608523264, 47182608531455,
+STORE, 47182608531456, 47182608568319,
+STORE, 47182608551936, 47182608568319,
+STORE, 47182608531456, 47182608551935,
+ERASE, 47182608531456, 47182608531456,
+STORE, 47182608531456, 47182608551935,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608551936, 47182608560127,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608568319,
+ERASE, 47182608551936, 47182608551936,
+STORE, 47182608551936, 47182608560127,
+STORE, 47182608560128, 47182608568319,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608568319,
+STORE, 47182608568320, 47182608916479,
+STORE, 47182608609280, 47182608916479,
+STORE, 47182608568320, 47182608609279,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608891903,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608822272, 47182608891903,
+STORE, 47182608609280, 47182608822271,
+ERASE, 47182608609280, 47182608609280,
+STORE, 47182608609280, 47182608822271,
+STORE, 47182608887808, 47182608891903,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608822272, 47182608822272,
+STORE, 47182608822272, 47182608887807,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608916479,
+STORE, 47182608916480, 47182611177471,
+STORE, 47182609068032, 47182611177471,
+STORE, 47182608916480, 47182609068031,
+ERASE, 47182609068032, 47182609068032,
+STORE, 47182609068032, 47182611161087,
+STORE, 47182611161088, 47182611177471,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611169279,
+ERASE, 47182611169280, 47182611169280,
+STORE, 47182611169280, 47182611177471,
+STORE, 47182611177472, 47182611312639,
+ERASE, 47182611177472, 47182611177472,
+STORE, 47182611177472, 47182611202047,
+STORE, 47182611202048, 47182611312639,
+STORE, 47182611263488, 47182611312639,
+STORE, 47182611202048, 47182611263487,
+ERASE, 47182611202048, 47182611202048,
+STORE, 47182611202048, 47182611263487,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611263488, 47182611288063,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611312639,
+ERASE, 47182611263488, 47182611263488,
+STORE, 47182611263488, 47182611288063,
+STORE, 47182611288064, 47182611312639,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611296255,
+ERASE, 47182611296256, 47182611296256,
+STORE, 47182611296256, 47182611312639,
+STORE, 47182611296256, 47182611320831,
+STORE, 47182611320832, 47182611484671,
+ERASE, 47182611320832, 47182611320832,
+STORE, 47182611320832, 47182611333119,
+STORE, 47182611333120, 47182611484671,
+STORE, 47182611431424, 47182611484671,
+STORE, 47182611333120, 47182611431423,
+ERASE, 47182611333120, 47182611333120,
+STORE, 47182611333120, 47182611431423,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611431424, 47182611476479,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611484671,
+ERASE, 47182611431424, 47182611431424,
+STORE, 47182611431424, 47182611476479,
+STORE, 47182611476480, 47182611484671,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611484671,
+STORE, 47182611484672, 47182612082687,
+STORE, 47182611603456, 47182612082687,
+STORE, 47182611484672, 47182611603455,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182612029439,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182611918848, 47182612029439,
+STORE, 47182611603456, 47182611918847,
+ERASE, 47182611603456, 47182611603456,
+STORE, 47182611603456, 47182611918847,
+STORE, 47182612025344, 47182612029439,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182611918848, 47182611918848,
+STORE, 47182611918848, 47182612025343,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612082687,
+STORE, 47182612082688, 47182615134207,
+STORE, 47182612627456, 47182615134207,
+STORE, 47182612082688, 47182612627455,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614913023,
+STORE, 47182614913024, 47182615134207,
+STORE, 47182614323200, 47182614913023,
+STORE, 47182612627456, 47182614323199,
+ERASE, 47182612627456, 47182612627456,
+STORE, 47182612627456, 47182614323199,
+STORE, 47182614908928, 47182614913023,
+STORE, 47182614323200, 47182614908927,
+ERASE, 47182614323200, 47182614323200,
+STORE, 47182614323200, 47182614908927,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615117823,
+ERASE, 47182615117824, 47182615117824,
+STORE, 47182615117824, 47182615134207,
+STORE, 47182615134208, 47182615166975,
+ERASE, 47182615134208, 47182615134208,
+STORE, 47182615134208, 47182615142399,
+STORE, 47182615142400, 47182615166975,
+STORE, 47182615154688, 47182615166975,
+STORE, 47182615142400, 47182615154687,
+ERASE, 47182615142400, 47182615142400,
+STORE, 47182615142400, 47182615154687,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615154688, 47182615158783,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615166975,
+ERASE, 47182615154688, 47182615154688,
+STORE, 47182615154688, 47182615158783,
+STORE, 47182615158784, 47182615166975,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615166975,
+STORE, 47182615166976, 47182615203839,
+ERASE, 47182615166976, 47182615166976,
+STORE, 47182615166976, 47182615175167,
+STORE, 47182615175168, 47182615203839,
+STORE, 47182615191552, 47182615203839,
+STORE, 47182615175168, 47182615191551,
+ERASE, 47182615175168, 47182615175168,
+STORE, 47182615175168, 47182615191551,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615191552, 47182615195647,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615203839,
+ERASE, 47182615191552, 47182615191552,
+STORE, 47182615191552, 47182615195647,
+STORE, 47182615195648, 47182615203839,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615203839,
+STORE, 47182615203840, 47182615678975,
+ERASE, 47182615203840, 47182615203840,
+STORE, 47182615203840, 47182615212031,
+STORE, 47182615212032, 47182615678975,
+STORE, 47182615547904, 47182615678975,
+STORE, 47182615212032, 47182615547903,
+ERASE, 47182615212032, 47182615212032,
+STORE, 47182615212032, 47182615547903,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615547904, 47182615670783,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615678975,
+ERASE, 47182615547904, 47182615547904,
+STORE, 47182615547904, 47182615670783,
+STORE, 47182615670784, 47182615678975,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615678975,
+STORE, 47182615678976, 47182615687167,
+STORE, 47182615687168, 47182615707647,
+ERASE, 47182615687168, 47182615687168,
+STORE, 47182615687168, 47182615691263,
+STORE, 47182615691264, 47182615707647,
+STORE, 47182615695360, 47182615707647,
+STORE, 47182615691264, 47182615695359,
+ERASE, 47182615691264, 47182615691264,
+STORE, 47182615691264, 47182615695359,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615695360, 47182615699455,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615707647,
+ERASE, 47182615695360, 47182615695360,
+STORE, 47182615695360, 47182615699455,
+STORE, 47182615699456, 47182615707647,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615707647,
+STORE, 47182615707648, 47182615715839,
+ERASE, 47182608371712, 47182608371712,
+STORE, 47182608371712, 47182608388095,
+STORE, 47182608388096, 47182608396287,
+ERASE, 47182615699456, 47182615699456,
+STORE, 47182615699456, 47182615703551,
+STORE, 47182615703552, 47182615707647,
+ERASE, 47182611288064, 47182611288064,
+STORE, 47182611288064, 47182611292159,
+STORE, 47182611292160, 47182611296255,
+ERASE, 47182615670784, 47182615670784,
+STORE, 47182615670784, 47182615674879,
+STORE, 47182615674880, 47182615678975,
+ERASE, 47182615195648, 47182615195648,
+STORE, 47182615195648, 47182615199743,
+STORE, 47182615199744, 47182615203839,
+ERASE, 47182615158784, 47182615158784,
+STORE, 47182615158784, 47182615162879,
+STORE, 47182615162880, 47182615166975,
+ERASE, 47182614913024, 47182614913024,
+STORE, 47182614913024, 47182615109631,
+STORE, 47182615109632, 47182615117823,
+ERASE, 47182612029440, 47182612029440,
+STORE, 47182612029440, 47182612066303,
+STORE, 47182612066304, 47182612082687,
+ERASE, 47182611476480, 47182611476480,
+STORE, 47182611476480, 47182611480575,
+STORE, 47182611480576, 47182611484671,
+ERASE, 47182611161088, 47182611161088,
+STORE, 47182611161088, 47182611165183,
+STORE, 47182611165184, 47182611169279,
+ERASE, 47182608891904, 47182608891904,
+STORE, 47182608891904, 47182608912383,
+STORE, 47182608912384, 47182608916479,
+ERASE, 47182608560128, 47182608560128,
+STORE, 47182608560128, 47182608564223,
+STORE, 47182608564224, 47182608568319,
+ERASE, 47182608515072, 47182608515072,
+STORE, 47182608515072, 47182608519167,
+STORE, 47182608519168, 47182608523263,
+ERASE, 93963664379904, 93963664379904,
+STORE, 93963664379904, 93963664502783,
+STORE, 93963664502784, 93963664506879,
+ERASE, 140450188599296, 140450188599296,
+STORE, 140450188599296, 140450188603391,
+STORE, 140450188603392, 140450188607487,
+ERASE, 47182606557184, 47182606557184,
+STORE, 93963694723072, 93963694858239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140730313261056, 140737488351231,
+ERASE, 140730313261056, 140730313261056,
+STORE, 140730313261056, 140730313265151,
+STORE, 94386579017728, 94386579697663,
+ERASE, 94386579017728, 94386579017728,
+STORE, 94386579017728, 94386579083263,
+STORE, 94386579083264, 94386579697663,
+ERASE, 94386579083264, 94386579083264,
+STORE, 94386579083264, 94386579431423,
+STORE, 94386579431424, 94386579570687,
+STORE, 94386579570688, 94386579697663,
+STORE, 140124810838016, 140124811010047,
+ERASE, 140124810838016, 140124810838016,
+STORE, 140124810838016, 140124810842111,
+STORE, 140124810842112, 140124811010047,
+ERASE, 140124810842112, 140124810842112,
+STORE, 140124810842112, 140124810964991,
+STORE, 140124810964992, 140124810997759,
+STORE, 140124810997760, 140124811005951,
+STORE, 140124811005952, 140124811010047,
+STORE, 140730313601024, 140730313605119,
+STORE, 140730313588736, 140730313601023,
+STORE, 47507984158720, 47507984166911,
+STORE, 47507984166912, 47507984175103,
+STORE, 47507984175104, 47507986014207,
+STORE, 47507984314368, 47507986014207,
+STORE, 47507984175104, 47507984314367,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985973247,
+STORE, 47507985973248, 47507986014207,
+STORE, 47507985657856, 47507985973247,
+STORE, 47507984314368, 47507985657855,
+ERASE, 47507984314368, 47507984314368,
+STORE, 47507984314368, 47507985657855,
+STORE, 47507985969152, 47507985973247,
+STORE, 47507985657856, 47507985969151,
+ERASE, 47507985657856, 47507985657856,
+STORE, 47507985657856, 47507985969151,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985997823,
+ERASE, 47507985997824, 47507985997824,
+STORE, 47507985997824, 47507986014207,
+STORE, 47507986014208, 47507986124799,
+STORE, 47507986030592, 47507986124799,
+STORE, 47507986014208, 47507986030591,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986116607,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986092032, 47507986116607,
+STORE, 47507986030592, 47507986092031,
+ERASE, 47507986030592, 47507986030592,
+STORE, 47507986030592, 47507986092031,
+STORE, 47507986112512, 47507986116607,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986092032, 47507986092032,
+STORE, 47507986092032, 47507986112511,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986124799,
+STORE, 47507986124800, 47507986169855,
+ERASE, 47507986124800, 47507986124800,
+STORE, 47507986124800, 47507986132991,
+STORE, 47507986132992, 47507986169855,
+STORE, 47507986153472, 47507986169855,
+STORE, 47507986132992, 47507986153471,
+ERASE, 47507986132992, 47507986132992,
+STORE, 47507986132992, 47507986153471,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986153472, 47507986161663,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986169855,
+ERASE, 47507986153472, 47507986153472,
+STORE, 47507986153472, 47507986161663,
+STORE, 47507986161664, 47507986169855,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986169855,
+STORE, 47507986169856, 47507986518015,
+STORE, 47507986210816, 47507986518015,
+STORE, 47507986169856, 47507986210815,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986493439,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986423808, 47507986493439,
+STORE, 47507986210816, 47507986423807,
+ERASE, 47507986210816, 47507986210816,
+STORE, 47507986210816, 47507986423807,
+STORE, 47507986489344, 47507986493439,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986423808, 47507986423808,
+STORE, 47507986423808, 47507986489343,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986518015,
+STORE, 47507986518016, 47507988779007,
+STORE, 47507986669568, 47507988779007,
+STORE, 47507986518016, 47507986669567,
+ERASE, 47507986669568, 47507986669568,
+STORE, 47507986669568, 47507988762623,
+STORE, 47507988762624, 47507988779007,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988770815,
+ERASE, 47507988770816, 47507988770816,
+STORE, 47507988770816, 47507988779007,
+STORE, 47507988779008, 47507988914175,
+ERASE, 47507988779008, 47507988779008,
+STORE, 47507988779008, 47507988803583,
+STORE, 47507988803584, 47507988914175,
+STORE, 47507988865024, 47507988914175,
+STORE, 47507988803584, 47507988865023,
+ERASE, 47507988803584, 47507988803584,
+STORE, 47507988803584, 47507988865023,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988865024, 47507988889599,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988914175,
+ERASE, 47507988865024, 47507988865024,
+STORE, 47507988865024, 47507988889599,
+STORE, 47507988889600, 47507988914175,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988897791,
+ERASE, 47507988897792, 47507988897792,
+STORE, 47507988897792, 47507988914175,
+STORE, 47507988897792, 47507988922367,
+STORE, 47507988922368, 47507989086207,
+ERASE, 47507988922368, 47507988922368,
+STORE, 47507988922368, 47507988934655,
+STORE, 47507988934656, 47507989086207,
+STORE, 47507989032960, 47507989086207,
+STORE, 47507988934656, 47507989032959,
+ERASE, 47507988934656, 47507988934656,
+STORE, 47507988934656, 47507989032959,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989032960, 47507989078015,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989086207,
+ERASE, 47507989032960, 47507989032960,
+STORE, 47507989032960, 47507989078015,
+STORE, 47507989078016, 47507989086207,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989086207,
+STORE, 47507989086208, 47507989684223,
+STORE, 47507989204992, 47507989684223,
+STORE, 47507989086208, 47507989204991,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989630975,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989520384, 47507989630975,
+STORE, 47507989204992, 47507989520383,
+ERASE, 47507989204992, 47507989204992,
+STORE, 47507989204992, 47507989520383,
+STORE, 47507989626880, 47507989630975,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989520384, 47507989520384,
+STORE, 47507989520384, 47507989626879,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989684223,
+STORE, 47507989684224, 47507992735743,
+STORE, 47507990228992, 47507992735743,
+STORE, 47507989684224, 47507990228991,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507992514559,
+STORE, 47507992514560, 47507992735743,
+STORE, 47507991924736, 47507992514559,
+STORE, 47507990228992, 47507991924735,
+ERASE, 47507990228992, 47507990228992,
+STORE, 47507990228992, 47507991924735,
+STORE, 47507992510464, 47507992514559,
+STORE, 47507991924736, 47507992510463,
+ERASE, 47507991924736, 47507991924736,
+STORE, 47507991924736, 47507992510463,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992719359,
+ERASE, 47507992719360, 47507992719360,
+STORE, 47507992719360, 47507992735743,
+STORE, 47507992735744, 47507992768511,
+ERASE, 47507992735744, 47507992735744,
+STORE, 47507992735744, 47507992743935,
+STORE, 47507992743936, 47507992768511,
+STORE, 47507992756224, 47507992768511,
+STORE, 47507992743936, 47507992756223,
+ERASE, 47507992743936, 47507992743936,
+STORE, 47507992743936, 47507992756223,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992756224, 47507992760319,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992768511,
+ERASE, 47507992756224, 47507992756224,
+STORE, 47507992756224, 47507992760319,
+STORE, 47507992760320, 47507992768511,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992768511,
+STORE, 47507992768512, 47507992805375,
+ERASE, 47507992768512, 47507992768512,
+STORE, 47507992768512, 47507992776703,
+STORE, 47507992776704, 47507992805375,
+STORE, 47507992793088, 47507992805375,
+STORE, 47507992776704, 47507992793087,
+ERASE, 47507992776704, 47507992776704,
+STORE, 47507992776704, 47507992793087,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992793088, 47507992797183,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992805375,
+ERASE, 47507992793088, 47507992793088,
+STORE, 47507992793088, 47507992797183,
+STORE, 47507992797184, 47507992805375,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992805375,
+STORE, 47507992805376, 47507993280511,
+ERASE, 47507992805376, 47507992805376,
+STORE, 47507992805376, 47507992813567,
+STORE, 47507992813568, 47507993280511,
+STORE, 47507993149440, 47507993280511,
+STORE, 47507992813568, 47507993149439,
+ERASE, 47507992813568, 47507992813568,
+STORE, 47507992813568, 47507993149439,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993149440, 47507993272319,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993280511,
+ERASE, 47507993149440, 47507993149440,
+STORE, 47507993149440, 47507993272319,
+STORE, 47507993272320, 47507993280511,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993280511,
+STORE, 47507993280512, 47507993288703,
+STORE, 47507993288704, 47507993309183,
+ERASE, 47507993288704, 47507993288704,
+STORE, 47507993288704, 47507993292799,
+STORE, 47507993292800, 47507993309183,
+STORE, 47507993296896, 47507993309183,
+STORE, 47507993292800, 47507993296895,
+ERASE, 47507993292800, 47507993292800,
+STORE, 47507993292800, 47507993296895,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993296896, 47507993300991,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993309183,
+ERASE, 47507993296896, 47507993296896,
+STORE, 47507993296896, 47507993300991,
+STORE, 47507993300992, 47507993309183,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993309183,
+STORE, 47507993309184, 47507993317375,
+ERASE, 47507985973248, 47507985973248,
+STORE, 47507985973248, 47507985989631,
+STORE, 47507985989632, 47507985997823,
+ERASE, 47507993300992, 47507993300992,
+STORE, 47507993300992, 47507993305087,
+STORE, 47507993305088, 47507993309183,
+ERASE, 47507988889600, 47507988889600,
+STORE, 47507988889600, 47507988893695,
+STORE, 47507988893696, 47507988897791,
+ERASE, 47507993272320, 47507993272320,
+STORE, 47507993272320, 47507993276415,
+STORE, 47507993276416, 47507993280511,
+ERASE, 47507992797184, 47507992797184,
+STORE, 47507992797184, 47507992801279,
+STORE, 47507992801280, 47507992805375,
+ERASE, 47507992760320, 47507992760320,
+STORE, 47507992760320, 47507992764415,
+STORE, 47507992764416, 47507992768511,
+ERASE, 47507992514560, 47507992514560,
+STORE, 47507992514560, 47507992711167,
+STORE, 47507992711168, 47507992719359,
+ERASE, 47507989630976, 47507989630976,
+STORE, 47507989630976, 47507989667839,
+STORE, 47507989667840, 47507989684223,
+ERASE, 47507989078016, 47507989078016,
+STORE, 47507989078016, 47507989082111,
+STORE, 47507989082112, 47507989086207,
+ERASE, 47507988762624, 47507988762624,
+STORE, 47507988762624, 47507988766719,
+STORE, 47507988766720, 47507988770815,
+ERASE, 47507986493440, 47507986493440,
+STORE, 47507986493440, 47507986513919,
+STORE, 47507986513920, 47507986518015,
+ERASE, 47507986161664, 47507986161664,
+STORE, 47507986161664, 47507986165759,
+STORE, 47507986165760, 47507986169855,
+ERASE, 47507986116608, 47507986116608,
+STORE, 47507986116608, 47507986120703,
+STORE, 47507986120704, 47507986124799,
+ERASE, 94386579570688, 94386579570688,
+STORE, 94386579570688, 94386579693567,
+STORE, 94386579693568, 94386579697663,
+ERASE, 140124810997760, 140124810997760,
+STORE, 140124810997760, 140124811001855,
+STORE, 140124811001856, 140124811005951,
+ERASE, 47507984158720, 47507984158720,
+STORE, 94386583982080, 94386584117247,
+STORE, 94386583982080, 94386584256511,
+ERASE, 94386583982080, 94386583982080,
+STORE, 94386583982080, 94386584223743,
+STORE, 94386584223744, 94386584256511,
+ERASE, 94386584223744, 94386584223744,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733763395584, 140737488351231,
+ERASE, 140733763395584, 140733763395584,
+STORE, 140733763395584, 140733763399679,
+STORE, 94011546472448, 94011547152383,
+ERASE, 94011546472448, 94011546472448,
+STORE, 94011546472448, 94011546537983,
+STORE, 94011546537984, 94011547152383,
+ERASE, 94011546537984, 94011546537984,
+STORE, 94011546537984, 94011546886143,
+STORE, 94011546886144, 94011547025407,
+STORE, 94011547025408, 94011547152383,
+STORE, 139757597949952, 139757598121983,
+ERASE, 139757597949952, 139757597949952,
+STORE, 139757597949952, 139757597954047,
+STORE, 139757597954048, 139757598121983,
+ERASE, 139757597954048, 139757597954048,
+STORE, 139757597954048, 139757598076927,
+STORE, 139757598076928, 139757598109695,
+STORE, 139757598109696, 139757598117887,
+STORE, 139757598117888, 139757598121983,
+STORE, 140733763596288, 140733763600383,
+STORE, 140733763584000, 140733763596287,
+STORE, 47875197046784, 47875197054975,
+STORE, 47875197054976, 47875197063167,
+STORE, 47875197063168, 47875198902271,
+STORE, 47875197202432, 47875198902271,
+STORE, 47875197063168, 47875197202431,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198861311,
+STORE, 47875198861312, 47875198902271,
+STORE, 47875198545920, 47875198861311,
+STORE, 47875197202432, 47875198545919,
+ERASE, 47875197202432, 47875197202432,
+STORE, 47875197202432, 47875198545919,
+STORE, 47875198857216, 47875198861311,
+STORE, 47875198545920, 47875198857215,
+ERASE, 47875198545920, 47875198545920,
+STORE, 47875198545920, 47875198857215,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198885887,
+ERASE, 47875198885888, 47875198885888,
+STORE, 47875198885888, 47875198902271,
+STORE, 47875198902272, 47875199012863,
+STORE, 47875198918656, 47875199012863,
+STORE, 47875198902272, 47875198918655,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875199004671,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875198980096, 47875199004671,
+STORE, 47875198918656, 47875198980095,
+ERASE, 47875198918656, 47875198918656,
+STORE, 47875198918656, 47875198980095,
+STORE, 47875199000576, 47875199004671,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875198980096, 47875198980096,
+STORE, 47875198980096, 47875199000575,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199012863,
+STORE, 47875199012864, 47875199057919,
+ERASE, 47875199012864, 47875199012864,
+STORE, 47875199012864, 47875199021055,
+STORE, 47875199021056, 47875199057919,
+STORE, 47875199041536, 47875199057919,
+STORE, 47875199021056, 47875199041535,
+ERASE, 47875199021056, 47875199021056,
+STORE, 47875199021056, 47875199041535,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199041536, 47875199049727,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199057919,
+ERASE, 47875199041536, 47875199041536,
+STORE, 47875199041536, 47875199049727,
+STORE, 47875199049728, 47875199057919,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199057919,
+STORE, 47875199057920, 47875199406079,
+STORE, 47875199098880, 47875199406079,
+STORE, 47875199057920, 47875199098879,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199381503,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199311872, 47875199381503,
+STORE, 47875199098880, 47875199311871,
+ERASE, 47875199098880, 47875199098880,
+STORE, 47875199098880, 47875199311871,
+STORE, 47875199377408, 47875199381503,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199311872, 47875199311872,
+STORE, 47875199311872, 47875199377407,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199406079,
+STORE, 47875199406080, 47875201667071,
+STORE, 47875199557632, 47875201667071,
+STORE, 47875199406080, 47875199557631,
+ERASE, 47875199557632, 47875199557632,
+STORE, 47875199557632, 47875201650687,
+STORE, 47875201650688, 47875201667071,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201658879,
+ERASE, 47875201658880, 47875201658880,
+STORE, 47875201658880, 47875201667071,
+STORE, 47875201667072, 47875201802239,
+ERASE, 47875201667072, 47875201667072,
+STORE, 47875201667072, 47875201691647,
+STORE, 47875201691648, 47875201802239,
+STORE, 47875201753088, 47875201802239,
+STORE, 47875201691648, 47875201753087,
+ERASE, 47875201691648, 47875201691648,
+STORE, 47875201691648, 47875201753087,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201753088, 47875201777663,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201802239,
+ERASE, 47875201753088, 47875201753088,
+STORE, 47875201753088, 47875201777663,
+STORE, 47875201777664, 47875201802239,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201785855,
+ERASE, 47875201785856, 47875201785856,
+STORE, 47875201785856, 47875201802239,
+STORE, 47875201785856, 47875201810431,
+STORE, 47875201810432, 47875201974271,
+ERASE, 47875201810432, 47875201810432,
+STORE, 47875201810432, 47875201822719,
+STORE, 47875201822720, 47875201974271,
+STORE, 47875201921024, 47875201974271,
+STORE, 47875201822720, 47875201921023,
+ERASE, 47875201822720, 47875201822720,
+STORE, 47875201822720, 47875201921023,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201921024, 47875201966079,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201974271,
+ERASE, 47875201921024, 47875201921024,
+STORE, 47875201921024, 47875201966079,
+STORE, 47875201966080, 47875201974271,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201974271,
+STORE, 47875201974272, 47875202572287,
+STORE, 47875202093056, 47875202572287,
+STORE, 47875201974272, 47875202093055,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202519039,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202408448, 47875202519039,
+STORE, 47875202093056, 47875202408447,
+ERASE, 47875202093056, 47875202093056,
+STORE, 47875202093056, 47875202408447,
+STORE, 47875202514944, 47875202519039,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202408448, 47875202408448,
+STORE, 47875202408448, 47875202514943,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202572287,
+STORE, 47875202572288, 47875205623807,
+STORE, 47875203117056, 47875205623807,
+STORE, 47875202572288, 47875203117055,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875205402623,
+STORE, 47875205402624, 47875205623807,
+STORE, 47875204812800, 47875205402623,
+STORE, 47875203117056, 47875204812799,
+ERASE, 47875203117056, 47875203117056,
+STORE, 47875203117056, 47875204812799,
+STORE, 47875205398528, 47875205402623,
+STORE, 47875204812800, 47875205398527,
+ERASE, 47875204812800, 47875204812800,
+STORE, 47875204812800, 47875205398527,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205607423,
+ERASE, 47875205607424, 47875205607424,
+STORE, 47875205607424, 47875205623807,
+STORE, 47875205623808, 47875205656575,
+ERASE, 47875205623808, 47875205623808,
+STORE, 47875205623808, 47875205631999,
+STORE, 47875205632000, 47875205656575,
+STORE, 47875205644288, 47875205656575,
+STORE, 47875205632000, 47875205644287,
+ERASE, 47875205632000, 47875205632000,
+STORE, 47875205632000, 47875205644287,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205644288, 47875205648383,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205656575,
+ERASE, 47875205644288, 47875205644288,
+STORE, 47875205644288, 47875205648383,
+STORE, 47875205648384, 47875205656575,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205656575,
+STORE, 47875205656576, 47875205693439,
+ERASE, 47875205656576, 47875205656576,
+STORE, 47875205656576, 47875205664767,
+STORE, 47875205664768, 47875205693439,
+STORE, 47875205681152, 47875205693439,
+STORE, 47875205664768, 47875205681151,
+ERASE, 47875205664768, 47875205664768,
+STORE, 47875205664768, 47875205681151,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205681152, 47875205685247,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205693439,
+ERASE, 47875205681152, 47875205681152,
+STORE, 47875205681152, 47875205685247,
+STORE, 47875205685248, 47875205693439,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205693439,
+STORE, 47875205693440, 47875206168575,
+ERASE, 47875205693440, 47875205693440,
+STORE, 47875205693440, 47875205701631,
+STORE, 47875205701632, 47875206168575,
+STORE, 47875206037504, 47875206168575,
+STORE, 47875205701632, 47875206037503,
+ERASE, 47875205701632, 47875205701632,
+STORE, 47875205701632, 47875206037503,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206037504, 47875206160383,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206168575,
+ERASE, 47875206037504, 47875206037504,
+STORE, 47875206037504, 47875206160383,
+STORE, 47875206160384, 47875206168575,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206168575,
+STORE, 47875206168576, 47875206176767,
+STORE, 47875206176768, 47875206197247,
+ERASE, 47875206176768, 47875206176768,
+STORE, 47875206176768, 47875206180863,
+STORE, 47875206180864, 47875206197247,
+STORE, 47875206184960, 47875206197247,
+STORE, 47875206180864, 47875206184959,
+ERASE, 47875206180864, 47875206180864,
+STORE, 47875206180864, 47875206184959,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206184960, 47875206189055,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206197247,
+ERASE, 47875206184960, 47875206184960,
+STORE, 47875206184960, 47875206189055,
+STORE, 47875206189056, 47875206197247,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206197247,
+STORE, 47875206197248, 47875206205439,
+ERASE, 47875198861312, 47875198861312,
+STORE, 47875198861312, 47875198877695,
+STORE, 47875198877696, 47875198885887,
+ERASE, 47875206189056, 47875206189056,
+STORE, 47875206189056, 47875206193151,
+STORE, 47875206193152, 47875206197247,
+ERASE, 47875201777664, 47875201777664,
+STORE, 47875201777664, 47875201781759,
+STORE, 47875201781760, 47875201785855,
+ERASE, 47875206160384, 47875206160384,
+STORE, 47875206160384, 47875206164479,
+STORE, 47875206164480, 47875206168575,
+ERASE, 47875205685248, 47875205685248,
+STORE, 47875205685248, 47875205689343,
+STORE, 47875205689344, 47875205693439,
+ERASE, 47875205648384, 47875205648384,
+STORE, 47875205648384, 47875205652479,
+STORE, 47875205652480, 47875205656575,
+ERASE, 47875205402624, 47875205402624,
+STORE, 47875205402624, 47875205599231,
+STORE, 47875205599232, 47875205607423,
+ERASE, 47875202519040, 47875202519040,
+STORE, 47875202519040, 47875202555903,
+STORE, 47875202555904, 47875202572287,
+ERASE, 47875201966080, 47875201966080,
+STORE, 47875201966080, 47875201970175,
+STORE, 47875201970176, 47875201974271,
+ERASE, 47875201650688, 47875201650688,
+STORE, 47875201650688, 47875201654783,
+STORE, 47875201654784, 47875201658879,
+ERASE, 47875199381504, 47875199381504,
+STORE, 47875199381504, 47875199401983,
+STORE, 47875199401984, 47875199406079,
+ERASE, 47875199049728, 47875199049728,
+STORE, 47875199049728, 47875199053823,
+STORE, 47875199053824, 47875199057919,
+ERASE, 47875199004672, 47875199004672,
+STORE, 47875199004672, 47875199008767,
+STORE, 47875199008768, 47875199012863,
+ERASE, 94011547025408, 94011547025408,
+STORE, 94011547025408, 94011547148287,
+STORE, 94011547148288, 94011547152383,
+ERASE, 139757598109696, 139757598109696,
+STORE, 139757598109696, 139757598113791,
+STORE, 139757598113792, 139757598117887,
+ERASE, 47875197046784, 47875197046784,
+STORE, 94011557584896, 94011557720063,
+STORE, 94011557584896, 94011557855231,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557851135,
+STORE, 94011557851136, 94011557855231,
+ERASE, 94011557851136, 94011557851136,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557847039,
+STORE, 94011557847040, 94011557851135,
+ERASE, 94011557847040, 94011557847040,
+STORE, 94011557584896, 94011557982207,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557978111,
+STORE, 94011557978112, 94011557982207,
+ERASE, 94011557978112, 94011557978112,
+ERASE, 94011557584896, 94011557584896,
+STORE, 94011557584896, 94011557974015,
+STORE, 94011557974016, 94011557978111,
+ERASE, 94011557974016, 94011557974016,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734130360320, 140737488351231,
+ERASE, 140734130360320, 140734130360320,
+STORE, 140734130360320, 140734130364415,
+STORE, 94641232105472, 94641232785407,
+ERASE, 94641232105472, 94641232105472,
+STORE, 94641232105472, 94641232171007,
+STORE, 94641232171008, 94641232785407,
+ERASE, 94641232171008, 94641232171008,
+STORE, 94641232171008, 94641232519167,
+STORE, 94641232519168, 94641232658431,
+STORE, 94641232658432, 94641232785407,
+STORE, 139726599516160, 139726599688191,
+ERASE, 139726599516160, 139726599516160,
+STORE, 139726599516160, 139726599520255,
+STORE, 139726599520256, 139726599688191,
+ERASE, 139726599520256, 139726599520256,
+STORE, 139726599520256, 139726599643135,
+STORE, 139726599643136, 139726599675903,
+STORE, 139726599675904, 139726599684095,
+STORE, 139726599684096, 139726599688191,
+STORE, 140734130446336, 140734130450431,
+STORE, 140734130434048, 140734130446335,
+STORE, 47906195480576, 47906195488767,
+STORE, 47906195488768, 47906195496959,
+STORE, 47906195496960, 47906197336063,
+STORE, 47906195636224, 47906197336063,
+STORE, 47906195496960, 47906195636223,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906197295103,
+STORE, 47906197295104, 47906197336063,
+STORE, 47906196979712, 47906197295103,
+STORE, 47906195636224, 47906196979711,
+ERASE, 47906195636224, 47906195636224,
+STORE, 47906195636224, 47906196979711,
+STORE, 47906197291008, 47906197295103,
+STORE, 47906196979712, 47906197291007,
+ERASE, 47906196979712, 47906196979712,
+STORE, 47906196979712, 47906197291007,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197319679,
+ERASE, 47906197319680, 47906197319680,
+STORE, 47906197319680, 47906197336063,
+STORE, 47906197336064, 47906197446655,
+STORE, 47906197352448, 47906197446655,
+STORE, 47906197336064, 47906197352447,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197438463,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197413888, 47906197438463,
+STORE, 47906197352448, 47906197413887,
+ERASE, 47906197352448, 47906197352448,
+STORE, 47906197352448, 47906197413887,
+STORE, 47906197434368, 47906197438463,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197413888, 47906197413888,
+STORE, 47906197413888, 47906197434367,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197446655,
+STORE, 47906197446656, 47906197491711,
+ERASE, 47906197446656, 47906197446656,
+STORE, 47906197446656, 47906197454847,
+STORE, 47906197454848, 47906197491711,
+STORE, 47906197475328, 47906197491711,
+STORE, 47906197454848, 47906197475327,
+ERASE, 47906197454848, 47906197454848,
+STORE, 47906197454848, 47906197475327,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197475328, 47906197483519,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197491711,
+ERASE, 47906197475328, 47906197475328,
+STORE, 47906197475328, 47906197483519,
+STORE, 47906197483520, 47906197491711,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197491711,
+STORE, 47906197491712, 47906197839871,
+STORE, 47906197532672, 47906197839871,
+STORE, 47906197491712, 47906197532671,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197815295,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197745664, 47906197815295,
+STORE, 47906197532672, 47906197745663,
+ERASE, 47906197532672, 47906197532672,
+STORE, 47906197532672, 47906197745663,
+STORE, 47906197811200, 47906197815295,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197745664, 47906197745664,
+STORE, 47906197745664, 47906197811199,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197839871,
+STORE, 47906197839872, 47906200100863,
+STORE, 47906197991424, 47906200100863,
+STORE, 47906197839872, 47906197991423,
+ERASE, 47906197991424, 47906197991424,
+STORE, 47906197991424, 47906200084479,
+STORE, 47906200084480, 47906200100863,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200092671,
+ERASE, 47906200092672, 47906200092672,
+STORE, 47906200092672, 47906200100863,
+STORE, 47906200100864, 47906200236031,
+ERASE, 47906200100864, 47906200100864,
+STORE, 47906200100864, 47906200125439,
+STORE, 47906200125440, 47906200236031,
+STORE, 47906200186880, 47906200236031,
+STORE, 47906200125440, 47906200186879,
+ERASE, 47906200125440, 47906200125440,
+STORE, 47906200125440, 47906200186879,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200186880, 47906200211455,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200236031,
+ERASE, 47906200186880, 47906200186880,
+STORE, 47906200186880, 47906200211455,
+STORE, 47906200211456, 47906200236031,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200219647,
+ERASE, 47906200219648, 47906200219648,
+STORE, 47906200219648, 47906200236031,
+STORE, 47906200219648, 47906200244223,
+STORE, 47906200244224, 47906200408063,
+ERASE, 47906200244224, 47906200244224,
+STORE, 47906200244224, 47906200256511,
+STORE, 47906200256512, 47906200408063,
+STORE, 47906200354816, 47906200408063,
+STORE, 47906200256512, 47906200354815,
+ERASE, 47906200256512, 47906200256512,
+STORE, 47906200256512, 47906200354815,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200354816, 47906200399871,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200408063,
+ERASE, 47906200354816, 47906200354816,
+STORE, 47906200354816, 47906200399871,
+STORE, 47906200399872, 47906200408063,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200408063,
+STORE, 47906200408064, 47906201006079,
+STORE, 47906200526848, 47906201006079,
+STORE, 47906200408064, 47906200526847,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200952831,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906200842240, 47906200952831,
+STORE, 47906200526848, 47906200842239,
+ERASE, 47906200526848, 47906200526848,
+STORE, 47906200526848, 47906200842239,
+STORE, 47906200948736, 47906200952831,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200842240, 47906200842240,
+STORE, 47906200842240, 47906200948735,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906201006079,
+STORE, 47906201006080, 47906204057599,
+STORE, 47906201550848, 47906204057599,
+STORE, 47906201006080, 47906201550847,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203836415,
+STORE, 47906203836416, 47906204057599,
+STORE, 47906203246592, 47906203836415,
+STORE, 47906201550848, 47906203246591,
+ERASE, 47906201550848, 47906201550848,
+STORE, 47906201550848, 47906203246591,
+STORE, 47906203832320, 47906203836415,
+STORE, 47906203246592, 47906203832319,
+ERASE, 47906203246592, 47906203246592,
+STORE, 47906203246592, 47906203832319,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204041215,
+ERASE, 47906204041216, 47906204041216,
+STORE, 47906204041216, 47906204057599,
+STORE, 47906204057600, 47906204090367,
+ERASE, 47906204057600, 47906204057600,
+STORE, 47906204057600, 47906204065791,
+STORE, 47906204065792, 47906204090367,
+STORE, 47906204078080, 47906204090367,
+STORE, 47906204065792, 47906204078079,
+ERASE, 47906204065792, 47906204065792,
+STORE, 47906204065792, 47906204078079,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204078080, 47906204082175,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204090367,
+ERASE, 47906204078080, 47906204078080,
+STORE, 47906204078080, 47906204082175,
+STORE, 47906204082176, 47906204090367,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204090367,
+STORE, 47906204090368, 47906204127231,
+ERASE, 47906204090368, 47906204090368,
+STORE, 47906204090368, 47906204098559,
+STORE, 47906204098560, 47906204127231,
+STORE, 47906204114944, 47906204127231,
+STORE, 47906204098560, 47906204114943,
+ERASE, 47906204098560, 47906204098560,
+STORE, 47906204098560, 47906204114943,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204114944, 47906204119039,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204127231,
+ERASE, 47906204114944, 47906204114944,
+STORE, 47906204114944, 47906204119039,
+STORE, 47906204119040, 47906204127231,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204127231,
+STORE, 47906204127232, 47906204602367,
+ERASE, 47906204127232, 47906204127232,
+STORE, 47906204127232, 47906204135423,
+STORE, 47906204135424, 47906204602367,
+STORE, 47906204471296, 47906204602367,
+STORE, 47906204135424, 47906204471295,
+ERASE, 47906204135424, 47906204135424,
+STORE, 47906204135424, 47906204471295,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204471296, 47906204594175,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204602367,
+ERASE, 47906204471296, 47906204471296,
+STORE, 47906204471296, 47906204594175,
+STORE, 47906204594176, 47906204602367,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204602367,
+STORE, 47906204602368, 47906204610559,
+STORE, 47906204610560, 47906204631039,
+ERASE, 47906204610560, 47906204610560,
+STORE, 47906204610560, 47906204614655,
+STORE, 47906204614656, 47906204631039,
+STORE, 47906204618752, 47906204631039,
+STORE, 47906204614656, 47906204618751,
+ERASE, 47906204614656, 47906204614656,
+STORE, 47906204614656, 47906204618751,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204618752, 47906204622847,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204631039,
+ERASE, 47906204618752, 47906204618752,
+STORE, 47906204618752, 47906204622847,
+STORE, 47906204622848, 47906204631039,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204631039,
+STORE, 47906204631040, 47906204639231,
+ERASE, 47906197295104, 47906197295104,
+STORE, 47906197295104, 47906197311487,
+STORE, 47906197311488, 47906197319679,
+ERASE, 47906204622848, 47906204622848,
+STORE, 47906204622848, 47906204626943,
+STORE, 47906204626944, 47906204631039,
+ERASE, 47906200211456, 47906200211456,
+STORE, 47906200211456, 47906200215551,
+STORE, 47906200215552, 47906200219647,
+ERASE, 47906204594176, 47906204594176,
+STORE, 47906204594176, 47906204598271,
+STORE, 47906204598272, 47906204602367,
+ERASE, 47906204119040, 47906204119040,
+STORE, 47906204119040, 47906204123135,
+STORE, 47906204123136, 47906204127231,
+ERASE, 47906204082176, 47906204082176,
+STORE, 47906204082176, 47906204086271,
+STORE, 47906204086272, 47906204090367,
+ERASE, 47906203836416, 47906203836416,
+STORE, 47906203836416, 47906204033023,
+STORE, 47906204033024, 47906204041215,
+ERASE, 47906200952832, 47906200952832,
+STORE, 47906200952832, 47906200989695,
+STORE, 47906200989696, 47906201006079,
+ERASE, 47906200399872, 47906200399872,
+STORE, 47906200399872, 47906200403967,
+STORE, 47906200403968, 47906200408063,
+ERASE, 47906200084480, 47906200084480,
+STORE, 47906200084480, 47906200088575,
+STORE, 47906200088576, 47906200092671,
+ERASE, 47906197815296, 47906197815296,
+STORE, 47906197815296, 47906197835775,
+STORE, 47906197835776, 47906197839871,
+ERASE, 47906197483520, 47906197483520,
+STORE, 47906197483520, 47906197487615,
+STORE, 47906197487616, 47906197491711,
+ERASE, 47906197438464, 47906197438464,
+STORE, 47906197438464, 47906197442559,
+STORE, 47906197442560, 47906197446655,
+ERASE, 94641232658432, 94641232658432,
+STORE, 94641232658432, 94641232781311,
+STORE, 94641232781312, 94641232785407,
+ERASE, 139726599675904, 139726599675904,
+STORE, 139726599675904, 139726599679999,
+STORE, 139726599680000, 139726599684095,
+ERASE, 47906195480576, 47906195480576,
+STORE, 94641242615808, 94641242750975,
+ };
+ unsigned long set11[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140732658499584, 140737488351231,
+ERASE, 140732658499584, 140732658499584,
+STORE, 140732658499584, 140732658503679,
+STORE, 94029856579584, 94029856751615,
+ERASE, 94029856579584, 94029856579584,
+STORE, 94029856579584, 94029856595967,
+STORE, 94029856595968, 94029856751615,
+ERASE, 94029856595968, 94029856595968,
+STORE, 94029856595968, 94029856698367,
+STORE, 94029856698368, 94029856739327,
+STORE, 94029856739328, 94029856751615,
+STORE, 140014592573440, 140014592745471,
+ERASE, 140014592573440, 140014592573440,
+STORE, 140014592573440, 140014592577535,
+STORE, 140014592577536, 140014592745471,
+ERASE, 140014592577536, 140014592577536,
+STORE, 140014592577536, 140014592700415,
+STORE, 140014592700416, 140014592733183,
+STORE, 140014592733184, 140014592741375,
+STORE, 140014592741376, 140014592745471,
+STORE, 140732658565120, 140732658569215,
+STORE, 140732658552832, 140732658565119,
+ };
+
+ unsigned long set12[] = { /* contains 12 values. */
+STORE, 140737488347136, 140737488351231,
+STORE, 140732658499584, 140737488351231,
+ERASE, 140732658499584, 140732658499584,
+STORE, 140732658499584, 140732658503679,
+STORE, 94029856579584, 94029856751615,
+ERASE, 94029856579584, 94029856579584,
+STORE, 94029856579584, 94029856595967,
+STORE, 94029856595968, 94029856751615,
+ERASE, 94029856595968, 94029856595968,
+STORE, 94029856595968, 94029856698367,
+STORE, 94029856698368, 94029856739327,
+STORE, 94029856739328, 94029856751615,
+STORE, 140014592573440, 140014592745471,
+ERASE, 140014592573440, 140014592573440,
+STORE, 140014592573440, 140014592577535,
+STORE, 140014592577536, 140014592745471,
+ERASE, 140014592577536, 140014592577536,
+STORE, 140014592577536, 140014592700415,
+STORE, 140014592700416, 140014592733183,
+STORE, 140014592733184, 140014592741375,
+STORE, 140014592741376, 140014592745471,
+STORE, 140732658565120, 140732658569215,
+STORE, 140732658552832, 140732658565119,
+STORE, 140014592741375, 140014592741375, /* contrived */
+STORE, 140014592733184, 140014592741376, /* creates first entry retry. */
+ };
+ unsigned long set13[] = {
+STORE, 140373516247040, 140373516251135,/*: ffffa2e7b0e10d80 */
+STORE, 140373516251136, 140373516255231,/*: ffffa2e7b1195d80 */
+STORE, 140373516255232, 140373516443647,/*: ffffa2e7b0e109c0 */
+STORE, 140373516443648, 140373516587007,/*: ffffa2e7b05fecc0 */
+STORE, 140373516963840, 140373518647295,/*: ffffa2e7bfbdcc00 */
+STORE, 140373518647296, 140373518663679,/*: ffffa2e7bf5d59c0 */
+STORE, 140373518663680, 140373518684159,/*: deleted (257) */
+STORE, 140373518680064, 140373518684159,/*: ffffa2e7b0e1cb40 */
+STORE, 140373518684160, 140373518688254,/*: ffffa2e7b05fec00 */
+STORE, 140373518688256, 140373518692351,/*: ffffa2e7bfbdcd80 */
+STORE, 140373518692352, 140373518696447,/*: ffffa2e7b0749e40 */
+ };
+ unsigned long set14[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731667996672, 140737488351231,
+SNULL, 140731668000767, 140737488351231,
+STORE, 140731667996672, 140731668000767,
+STORE, 140731667865600, 140731668000767,
+STORE, 94077521272832, 94077521313791,
+SNULL, 94077521301503, 94077521313791,
+STORE, 94077521272832, 94077521301503,
+STORE, 94077521301504, 94077521313791,
+ERASE, 94077521301504, 94077521313791,
+STORE, 94077521305600, 94077521313791,
+STORE, 139826134630400, 139826136883199,
+SNULL, 139826134773759, 139826136883199,
+STORE, 139826134630400, 139826134773759,
+STORE, 139826134773760, 139826136883199,
+ERASE, 139826134773760, 139826136883199,
+STORE, 139826136870912, 139826136879103,
+STORE, 139826136879104, 139826136883199,
+STORE, 140731668013056, 140731668017151,
+STORE, 140731668000768, 140731668013055,
+STORE, 139826136862720, 139826136870911,
+STORE, 139826132406272, 139826134630399,
+SNULL, 139826134056959, 139826134630399,
+STORE, 139826132406272, 139826134056959,
+STORE, 139826134056960, 139826134630399,
+SNULL, 139826134056960, 139826134626303,
+STORE, 139826134626304, 139826134630399,
+STORE, 139826134056960, 139826134626303,
+ERASE, 139826134056960, 139826134626303,
+STORE, 139826134056960, 139826134626303,
+ERASE, 139826134626304, 139826134630399,
+STORE, 139826134626304, 139826134630399,
+STORE, 139826136842240, 139826136862719,
+STORE, 139826130022400, 139826132406271,
+SNULL, 139826130022400, 139826130288639,
+STORE, 139826130288640, 139826132406271,
+STORE, 139826130022400, 139826130288639,
+SNULL, 139826132381695, 139826132406271,
+STORE, 139826130288640, 139826132381695,
+STORE, 139826132381696, 139826132406271,
+SNULL, 139826132381696, 139826132402175,
+STORE, 139826132402176, 139826132406271,
+STORE, 139826132381696, 139826132402175,
+ERASE, 139826132381696, 139826132402175,
+STORE, 139826132381696, 139826132402175,
+ERASE, 139826132402176, 139826132406271,
+STORE, 139826132402176, 139826132406271,
+STORE, 139826127806464, 139826130022399,
+SNULL, 139826127806464, 139826127904767,
+STORE, 139826127904768, 139826130022399,
+STORE, 139826127806464, 139826127904767,
+SNULL, 139826129997823, 139826130022399,
+STORE, 139826127904768, 139826129997823,
+STORE, 139826129997824, 139826130022399,
+SNULL, 139826129997824, 139826130006015,
+STORE, 139826130006016, 139826130022399,
+STORE, 139826129997824, 139826130006015,
+ERASE, 139826129997824, 139826130006015,
+STORE, 139826129997824, 139826130006015,
+ERASE, 139826130006016, 139826130022399,
+STORE, 139826130006016, 139826130022399,
+STORE, 139826124009472, 139826127806463,
+SNULL, 139826124009472, 139826125668351,
+STORE, 139826125668352, 139826127806463,
+STORE, 139826124009472, 139826125668351,
+SNULL, 139826127765503, 139826127806463,
+STORE, 139826125668352, 139826127765503,
+STORE, 139826127765504, 139826127806463,
+SNULL, 139826127765504, 139826127790079,
+STORE, 139826127790080, 139826127806463,
+STORE, 139826127765504, 139826127790079,
+ERASE, 139826127765504, 139826127790079,
+STORE, 139826127765504, 139826127790079,
+ERASE, 139826127790080, 139826127806463,
+STORE, 139826127790080, 139826127806463,
+STORE, 139826121748480, 139826124009471,
+SNULL, 139826121748480, 139826121900031,
+STORE, 139826121900032, 139826124009471,
+STORE, 139826121748480, 139826121900031,
+SNULL, 139826123993087, 139826124009471,
+STORE, 139826121900032, 139826123993087,
+STORE, 139826123993088, 139826124009471,
+SNULL, 139826123993088, 139826124001279,
+STORE, 139826124001280, 139826124009471,
+STORE, 139826123993088, 139826124001279,
+ERASE, 139826123993088, 139826124001279,
+STORE, 139826123993088, 139826124001279,
+ERASE, 139826124001280, 139826124009471,
+STORE, 139826124001280, 139826124009471,
+STORE, 139826119626752, 139826121748479,
+SNULL, 139826119626752, 139826119643135,
+STORE, 139826119643136, 139826121748479,
+STORE, 139826119626752, 139826119643135,
+SNULL, 139826121740287, 139826121748479,
+STORE, 139826119643136, 139826121740287,
+STORE, 139826121740288, 139826121748479,
+ERASE, 139826121740288, 139826121748479,
+STORE, 139826121740288, 139826121748479,
+STORE, 139826136834048, 139826136842239,
+STORE, 139826117496832, 139826119626751,
+SNULL, 139826117496832, 139826117525503,
+STORE, 139826117525504, 139826119626751,
+STORE, 139826117496832, 139826117525503,
+SNULL, 139826119618559, 139826119626751,
+STORE, 139826117525504, 139826119618559,
+STORE, 139826119618560, 139826119626751,
+ERASE, 139826119618560, 139826119626751,
+STORE, 139826119618560, 139826119626751,
+STORE, 139826115244032, 139826117496831,
+SNULL, 139826115244032, 139826115395583,
+STORE, 139826115395584, 139826117496831,
+STORE, 139826115244032, 139826115395583,
+SNULL, 139826117488639, 139826117496831,
+STORE, 139826115395584, 139826117488639,
+STORE, 139826117488640, 139826117496831,
+ERASE, 139826117488640, 139826117496831,
+STORE, 139826117488640, 139826117496831,
+STORE, 139826113073152, 139826115244031,
+SNULL, 139826113073152, 139826113142783,
+STORE, 139826113142784, 139826115244031,
+STORE, 139826113073152, 139826113142783,
+SNULL, 139826115235839, 139826115244031,
+STORE, 139826113142784, 139826115235839,
+STORE, 139826115235840, 139826115244031,
+ERASE, 139826115235840, 139826115244031,
+STORE, 139826115235840, 139826115244031,
+STORE, 139826109861888, 139826113073151,
+SNULL, 139826109861888, 139826110939135,
+STORE, 139826110939136, 139826113073151,
+STORE, 139826109861888, 139826110939135,
+SNULL, 139826113036287, 139826113073151,
+STORE, 139826110939136, 139826113036287,
+STORE, 139826113036288, 139826113073151,
+ERASE, 139826113036288, 139826113073151,
+STORE, 139826113036288, 139826113073151,
+STORE, 139826107727872, 139826109861887,
+SNULL, 139826107727872, 139826107756543,
+STORE, 139826107756544, 139826109861887,
+STORE, 139826107727872, 139826107756543,
+SNULL, 139826109853695, 139826109861887,
+STORE, 139826107756544, 139826109853695,
+STORE, 139826109853696, 139826109861887,
+ERASE, 139826109853696, 139826109861887,
+STORE, 139826109853696, 139826109861887,
+STORE, 139826105417728, 139826107727871,
+SNULL, 139826105417728, 139826105622527,
+STORE, 139826105622528, 139826107727871,
+STORE, 139826105417728, 139826105622527,
+SNULL, 139826107719679, 139826107727871,
+STORE, 139826105622528, 139826107719679,
+STORE, 139826107719680, 139826107727871,
+ERASE, 139826107719680, 139826107727871,
+STORE, 139826107719680, 139826107727871,
+STORE, 139826136825856, 139826136842239,
+STORE, 139826103033856, 139826105417727,
+SNULL, 139826103033856, 139826103226367,
+STORE, 139826103226368, 139826105417727,
+STORE, 139826103033856, 139826103226367,
+SNULL, 139826105319423, 139826105417727,
+STORE, 139826103226368, 139826105319423,
+STORE, 139826105319424, 139826105417727,
+ERASE, 139826105319424, 139826105417727,
+STORE, 139826105319424, 139826105417727,
+STORE, 139826100916224, 139826103033855,
+SNULL, 139826100916224, 139826100932607,
+STORE, 139826100932608, 139826103033855,
+STORE, 139826100916224, 139826100932607,
+SNULL, 139826103025663, 139826103033855,
+STORE, 139826100932608, 139826103025663,
+STORE, 139826103025664, 139826103033855,
+ERASE, 139826103025664, 139826103033855,
+STORE, 139826103025664, 139826103033855,
+STORE, 139826098348032, 139826100916223,
+SNULL, 139826098348032, 139826098814975,
+STORE, 139826098814976, 139826100916223,
+STORE, 139826098348032, 139826098814975,
+SNULL, 139826100908031, 139826100916223,
+STORE, 139826098814976, 139826100908031,
+STORE, 139826100908032, 139826100916223,
+ERASE, 139826100908032, 139826100916223,
+STORE, 139826100908032, 139826100916223,
+STORE, 139826096234496, 139826098348031,
+SNULL, 139826096234496, 139826096246783,
+STORE, 139826096246784, 139826098348031,
+STORE, 139826096234496, 139826096246783,
+SNULL, 139826098339839, 139826098348031,
+STORE, 139826096246784, 139826098339839,
+STORE, 139826098339840, 139826098348031,
+ERASE, 139826098339840, 139826098348031,
+STORE, 139826098339840, 139826098348031,
+STORE, 139826094055424, 139826096234495,
+SNULL, 139826094055424, 139826094133247,
+STORE, 139826094133248, 139826096234495,
+STORE, 139826094055424, 139826094133247,
+SNULL, 139826096226303, 139826096234495,
+STORE, 139826094133248, 139826096226303,
+STORE, 139826096226304, 139826096234495,
+ERASE, 139826096226304, 139826096234495,
+STORE, 139826096226304, 139826096234495,
+STORE, 139826136817664, 139826136842239,
+STORE, 139826091937792, 139826094055423,
+SNULL, 139826091937792, 139826091954175,
+STORE, 139826091954176, 139826094055423,
+STORE, 139826091937792, 139826091954175,
+SNULL, 139826094047231, 139826094055423,
+STORE, 139826091954176, 139826094047231,
+STORE, 139826094047232, 139826094055423,
+ERASE, 139826094047232, 139826094055423,
+STORE, 139826094047232, 139826094055423,
+STORE, 139826136809472, 139826136842239,
+SNULL, 139826127781887, 139826127790079,
+STORE, 139826127765504, 139826127781887,
+STORE, 139826127781888, 139826127790079,
+SNULL, 139826094051327, 139826094055423,
+STORE, 139826094047232, 139826094051327,
+STORE, 139826094051328, 139826094055423,
+SNULL, 139826096230399, 139826096234495,
+STORE, 139826096226304, 139826096230399,
+STORE, 139826096230400, 139826096234495,
+SNULL, 139826098343935, 139826098348031,
+STORE, 139826098339840, 139826098343935,
+STORE, 139826098343936, 139826098348031,
+SNULL, 139826130001919, 139826130006015,
+STORE, 139826129997824, 139826130001919,
+STORE, 139826130001920, 139826130006015,
+SNULL, 139826100912127, 139826100916223,
+STORE, 139826100908032, 139826100912127,
+STORE, 139826100912128, 139826100916223,
+SNULL, 139826103029759, 139826103033855,
+STORE, 139826103025664, 139826103029759,
+STORE, 139826103029760, 139826103033855,
+SNULL, 139826105413631, 139826105417727,
+STORE, 139826105319424, 139826105413631,
+STORE, 139826105413632, 139826105417727,
+SNULL, 139826107723775, 139826107727871,
+STORE, 139826107719680, 139826107723775,
+STORE, 139826107723776, 139826107727871,
+SNULL, 139826109857791, 139826109861887,
+STORE, 139826109853696, 139826109857791,
+STORE, 139826109857792, 139826109861887,
+SNULL, 139826113044479, 139826113073151,
+STORE, 139826113036288, 139826113044479,
+STORE, 139826113044480, 139826113073151,
+SNULL, 139826115239935, 139826115244031,
+STORE, 139826115235840, 139826115239935,
+STORE, 139826115239936, 139826115244031,
+SNULL, 139826117492735, 139826117496831,
+STORE, 139826117488640, 139826117492735,
+STORE, 139826117492736, 139826117496831,
+SNULL, 139826119622655, 139826119626751,
+STORE, 139826119618560, 139826119622655,
+STORE, 139826119622656, 139826119626751,
+SNULL, 139826121744383, 139826121748479,
+STORE, 139826121740288, 139826121744383,
+STORE, 139826121744384, 139826121748479,
+SNULL, 139826123997183, 139826124001279,
+STORE, 139826123993088, 139826123997183,
+STORE, 139826123997184, 139826124001279,
+SNULL, 139826132398079, 139826132402175,
+STORE, 139826132381696, 139826132398079,
+STORE, 139826132398080, 139826132402175,
+SNULL, 139826134622207, 139826134626303,
+STORE, 139826134056960, 139826134622207,
+STORE, 139826134622208, 139826134626303,
+SNULL, 94077521309695, 94077521313791,
+STORE, 94077521305600, 94077521309695,
+STORE, 94077521309696, 94077521313791,
+SNULL, 139826136875007, 139826136879103,
+STORE, 139826136870912, 139826136875007,
+STORE, 139826136875008, 139826136879103,
+ERASE, 139826136842240, 139826136862719,
+STORE, 94077554049024, 94077554184191,
+STORE, 139826136543232, 139826136842239,
+STORE, 139826136276992, 139826136842239,
+STORE, 139826136010752, 139826136842239,
+STORE, 139826135744512, 139826136842239,
+SNULL, 139826136543231, 139826136842239,
+STORE, 139826135744512, 139826136543231,
+STORE, 139826136543232, 139826136842239,
+SNULL, 139826136543232, 139826136809471,
+STORE, 139826136809472, 139826136842239,
+STORE, 139826136543232, 139826136809471,
+ };
+ unsigned long set15[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722061451264, 140737488351231,
+SNULL, 140722061455359, 140737488351231,
+STORE, 140722061451264, 140722061455359,
+STORE, 140722061320192, 140722061455359,
+STORE, 94728600248320, 94728600289279,
+SNULL, 94728600276991, 94728600289279,
+STORE, 94728600248320, 94728600276991,
+STORE, 94728600276992, 94728600289279,
+ERASE, 94728600276992, 94728600289279,
+STORE, 94728600281088, 94728600289279,
+STORE, 139906806779904, 139906809032703,
+SNULL, 139906806923263, 139906809032703,
+STORE, 139906806779904, 139906806923263,
+STORE, 139906806923264, 139906809032703,
+ERASE, 139906806923264, 139906809032703,
+STORE, 139906809020416, 139906809028607,
+STORE, 139906809028608, 139906809032703,
+STORE, 140722061692928, 140722061697023,
+STORE, 140722061680640, 140722061692927,
+STORE, 139906809012224, 139906809020415,
+STORE, 139906804555776, 139906806779903,
+SNULL, 139906806206463, 139906806779903,
+STORE, 139906804555776, 139906806206463,
+STORE, 139906806206464, 139906806779903,
+SNULL, 139906806206464, 139906806775807,
+STORE, 139906806775808, 139906806779903,
+STORE, 139906806206464, 139906806775807,
+ERASE, 139906806206464, 139906806775807,
+STORE, 139906806206464, 139906806775807,
+ERASE, 139906806775808, 139906806779903,
+STORE, 139906806775808, 139906806779903,
+STORE, 139906808991744, 139906809012223,
+STORE, 139906802171904, 139906804555775,
+SNULL, 139906802171904, 139906802438143,
+STORE, 139906802438144, 139906804555775,
+STORE, 139906802171904, 139906802438143,
+SNULL, 139906804531199, 139906804555775,
+STORE, 139906802438144, 139906804531199,
+STORE, 139906804531200, 139906804555775,
+SNULL, 139906804531200, 139906804551679,
+STORE, 139906804551680, 139906804555775,
+STORE, 139906804531200, 139906804551679,
+ERASE, 139906804531200, 139906804551679,
+STORE, 139906804531200, 139906804551679,
+ERASE, 139906804551680, 139906804555775,
+STORE, 139906804551680, 139906804555775,
+STORE, 139906799955968, 139906802171903,
+SNULL, 139906799955968, 139906800054271,
+STORE, 139906800054272, 139906802171903,
+STORE, 139906799955968, 139906800054271,
+SNULL, 139906802147327, 139906802171903,
+STORE, 139906800054272, 139906802147327,
+STORE, 139906802147328, 139906802171903,
+SNULL, 139906802147328, 139906802155519,
+STORE, 139906802155520, 139906802171903,
+STORE, 139906802147328, 139906802155519,
+ERASE, 139906802147328, 139906802155519,
+STORE, 139906802147328, 139906802155519,
+ERASE, 139906802155520, 139906802171903,
+STORE, 139906802155520, 139906802171903,
+STORE, 139906796158976, 139906799955967,
+SNULL, 139906796158976, 139906797817855,
+STORE, 139906797817856, 139906799955967,
+STORE, 139906796158976, 139906797817855,
+SNULL, 139906799915007, 139906799955967,
+STORE, 139906797817856, 139906799915007,
+STORE, 139906799915008, 139906799955967,
+SNULL, 139906799915008, 139906799939583,
+STORE, 139906799939584, 139906799955967,
+STORE, 139906799915008, 139906799939583,
+ERASE, 139906799915008, 139906799939583,
+STORE, 139906799915008, 139906799939583,
+ERASE, 139906799939584, 139906799955967,
+STORE, 139906799939584, 139906799955967,
+STORE, 139906793897984, 139906796158975,
+SNULL, 139906793897984, 139906794049535,
+STORE, 139906794049536, 139906796158975,
+STORE, 139906793897984, 139906794049535,
+SNULL, 139906796142591, 139906796158975,
+STORE, 139906794049536, 139906796142591,
+STORE, 139906796142592, 139906796158975,
+SNULL, 139906796142592, 139906796150783,
+STORE, 139906796150784, 139906796158975,
+STORE, 139906796142592, 139906796150783,
+ERASE, 139906796142592, 139906796150783,
+STORE, 139906796142592, 139906796150783,
+ERASE, 139906796150784, 139906796158975,
+STORE, 139906796150784, 139906796158975,
+STORE, 139906791776256, 139906793897983,
+SNULL, 139906791776256, 139906791792639,
+STORE, 139906791792640, 139906793897983,
+STORE, 139906791776256, 139906791792639,
+SNULL, 139906793889791, 139906793897983,
+STORE, 139906791792640, 139906793889791,
+STORE, 139906793889792, 139906793897983,
+ERASE, 139906793889792, 139906793897983,
+STORE, 139906793889792, 139906793897983,
+STORE, 139906808983552, 139906808991743,
+STORE, 139906789646336, 139906791776255,
+SNULL, 139906789646336, 139906789675007,
+STORE, 139906789675008, 139906791776255,
+STORE, 139906789646336, 139906789675007,
+SNULL, 139906791768063, 139906791776255,
+STORE, 139906789675008, 139906791768063,
+STORE, 139906791768064, 139906791776255,
+ERASE, 139906791768064, 139906791776255,
+STORE, 139906791768064, 139906791776255,
+STORE, 139906787393536, 139906789646335,
+SNULL, 139906787393536, 139906787545087,
+STORE, 139906787545088, 139906789646335,
+STORE, 139906787393536, 139906787545087,
+SNULL, 139906789638143, 139906789646335,
+STORE, 139906787545088, 139906789638143,
+STORE, 139906789638144, 139906789646335,
+ERASE, 139906789638144, 139906789646335,
+STORE, 139906789638144, 139906789646335,
+STORE, 139906785222656, 139906787393535,
+SNULL, 139906785222656, 139906785292287,
+STORE, 139906785292288, 139906787393535,
+STORE, 139906785222656, 139906785292287,
+SNULL, 139906787385343, 139906787393535,
+STORE, 139906785292288, 139906787385343,
+STORE, 139906787385344, 139906787393535,
+ERASE, 139906787385344, 139906787393535,
+STORE, 139906787385344, 139906787393535,
+STORE, 139906782011392, 139906785222655,
+SNULL, 139906782011392, 139906783088639,
+STORE, 139906783088640, 139906785222655,
+STORE, 139906782011392, 139906783088639,
+SNULL, 139906785185791, 139906785222655,
+STORE, 139906783088640, 139906785185791,
+STORE, 139906785185792, 139906785222655,
+ERASE, 139906785185792, 139906785222655,
+STORE, 139906785185792, 139906785222655,
+STORE, 139906779877376, 139906782011391,
+SNULL, 139906779877376, 139906779906047,
+STORE, 139906779906048, 139906782011391,
+STORE, 139906779877376, 139906779906047,
+SNULL, 139906782003199, 139906782011391,
+STORE, 139906779906048, 139906782003199,
+STORE, 139906782003200, 139906782011391,
+ERASE, 139906782003200, 139906782011391,
+STORE, 139906782003200, 139906782011391,
+STORE, 139906777567232, 139906779877375,
+SNULL, 139906777567232, 139906777772031,
+STORE, 139906777772032, 139906779877375,
+STORE, 139906777567232, 139906777772031,
+SNULL, 139906779869183, 139906779877375,
+STORE, 139906777772032, 139906779869183,
+STORE, 139906779869184, 139906779877375,
+ERASE, 139906779869184, 139906779877375,
+STORE, 139906779869184, 139906779877375,
+STORE, 139906808975360, 139906808991743,
+STORE, 139906775183360, 139906777567231,
+SNULL, 139906775183360, 139906775375871,
+STORE, 139906775375872, 139906777567231,
+STORE, 139906775183360, 139906775375871,
+SNULL, 139906777468927, 139906777567231,
+STORE, 139906775375872, 139906777468927,
+STORE, 139906777468928, 139906777567231,
+ERASE, 139906777468928, 139906777567231,
+STORE, 139906777468928, 139906777567231,
+STORE, 139906773065728, 139906775183359,
+SNULL, 139906773065728, 139906773082111,
+STORE, 139906773082112, 139906775183359,
+STORE, 139906773065728, 139906773082111,
+SNULL, 139906775175167, 139906775183359,
+STORE, 139906773082112, 139906775175167,
+STORE, 139906775175168, 139906775183359,
+ERASE, 139906775175168, 139906775183359,
+STORE, 139906775175168, 139906775183359,
+STORE, 139906770497536, 139906773065727,
+SNULL, 139906770497536, 139906770964479,
+STORE, 139906770964480, 139906773065727,
+STORE, 139906770497536, 139906770964479,
+SNULL, 139906773057535, 139906773065727,
+STORE, 139906770964480, 139906773057535,
+STORE, 139906773057536, 139906773065727,
+ERASE, 139906773057536, 139906773065727,
+STORE, 139906773057536, 139906773065727,
+STORE, 139906768384000, 139906770497535,
+SNULL, 139906768384000, 139906768396287,
+STORE, 139906768396288, 139906770497535,
+STORE, 139906768384000, 139906768396287,
+SNULL, 139906770489343, 139906770497535,
+STORE, 139906768396288, 139906770489343,
+STORE, 139906770489344, 139906770497535,
+ERASE, 139906770489344, 139906770497535,
+STORE, 139906770489344, 139906770497535,
+STORE, 139906766204928, 139906768383999,
+SNULL, 139906766204928, 139906766282751,
+STORE, 139906766282752, 139906768383999,
+STORE, 139906766204928, 139906766282751,
+SNULL, 139906768375807, 139906768383999,
+STORE, 139906766282752, 139906768375807,
+STORE, 139906768375808, 139906768383999,
+ERASE, 139906768375808, 139906768383999,
+STORE, 139906768375808, 139906768383999,
+STORE, 139906808967168, 139906808991743,
+STORE, 139906764087296, 139906766204927,
+SNULL, 139906764087296, 139906764103679,
+STORE, 139906764103680, 139906766204927,
+STORE, 139906764087296, 139906764103679,
+SNULL, 139906766196735, 139906766204927,
+STORE, 139906764103680, 139906766196735,
+STORE, 139906766196736, 139906766204927,
+ERASE, 139906766196736, 139906766204927,
+STORE, 139906766196736, 139906766204927,
+STORE, 139906808958976, 139906808991743,
+SNULL, 139906799931391, 139906799939583,
+STORE, 139906799915008, 139906799931391,
+STORE, 139906799931392, 139906799939583,
+SNULL, 139906766200831, 139906766204927,
+STORE, 139906766196736, 139906766200831,
+STORE, 139906766200832, 139906766204927,
+SNULL, 139906768379903, 139906768383999,
+STORE, 139906768375808, 139906768379903,
+STORE, 139906768379904, 139906768383999,
+SNULL, 139906770493439, 139906770497535,
+STORE, 139906770489344, 139906770493439,
+STORE, 139906770493440, 139906770497535,
+SNULL, 139906802151423, 139906802155519,
+STORE, 139906802147328, 139906802151423,
+STORE, 139906802151424, 139906802155519,
+SNULL, 139906773061631, 139906773065727,
+STORE, 139906773057536, 139906773061631,
+STORE, 139906773061632, 139906773065727,
+SNULL, 139906775179263, 139906775183359,
+STORE, 139906775175168, 139906775179263,
+STORE, 139906775179264, 139906775183359,
+SNULL, 139906777563135, 139906777567231,
+STORE, 139906777468928, 139906777563135,
+STORE, 139906777563136, 139906777567231,
+SNULL, 139906779873279, 139906779877375,
+STORE, 139906779869184, 139906779873279,
+STORE, 139906779873280, 139906779877375,
+SNULL, 139906782007295, 139906782011391,
+STORE, 139906782003200, 139906782007295,
+STORE, 139906782007296, 139906782011391,
+SNULL, 139906785193983, 139906785222655,
+STORE, 139906785185792, 139906785193983,
+STORE, 139906785193984, 139906785222655,
+SNULL, 139906787389439, 139906787393535,
+STORE, 139906787385344, 139906787389439,
+STORE, 139906787389440, 139906787393535,
+SNULL, 139906789642239, 139906789646335,
+STORE, 139906789638144, 139906789642239,
+STORE, 139906789642240, 139906789646335,
+SNULL, 139906791772159, 139906791776255,
+STORE, 139906791768064, 139906791772159,
+STORE, 139906791772160, 139906791776255,
+SNULL, 139906793893887, 139906793897983,
+STORE, 139906793889792, 139906793893887,
+STORE, 139906793893888, 139906793897983,
+SNULL, 139906796146687, 139906796150783,
+STORE, 139906796142592, 139906796146687,
+STORE, 139906796146688, 139906796150783,
+SNULL, 139906804547583, 139906804551679,
+STORE, 139906804531200, 139906804547583,
+STORE, 139906804547584, 139906804551679,
+SNULL, 139906806771711, 139906806775807,
+STORE, 139906806206464, 139906806771711,
+STORE, 139906806771712, 139906806775807,
+SNULL, 94728600285183, 94728600289279,
+STORE, 94728600281088, 94728600285183,
+STORE, 94728600285184, 94728600289279,
+SNULL, 139906809024511, 139906809028607,
+STORE, 139906809020416, 139906809024511,
+STORE, 139906809024512, 139906809028607,
+ERASE, 139906808991744, 139906809012223,
+STORE, 94728620138496, 94728620273663,
+STORE, 139906808692736, 139906808991743,
+STORE, 139906808426496, 139906808991743,
+STORE, 139906808160256, 139906808991743,
+STORE, 139906807894016, 139906808991743,
+SNULL, 139906808692735, 139906808991743,
+STORE, 139906807894016, 139906808692735,
+STORE, 139906808692736, 139906808991743,
+SNULL, 139906808692736, 139906808958975,
+STORE, 139906808958976, 139906808991743,
+STORE, 139906808692736, 139906808958975,
+ };
+
+ unsigned long set16[] = {
+STORE, 94174808662016, 94174809321471,
+STORE, 94174811414528, 94174811426815,
+STORE, 94174811426816, 94174811430911,
+STORE, 94174811430912, 94174811443199,
+STORE, 94174841700352, 94174841835519,
+STORE, 140173257838592, 140173259497471,
+STORE, 140173259497472, 140173261594623,
+STORE, 140173261594624, 140173261611007,
+STORE, 140173261611008, 140173261619199,
+STORE, 140173261619200, 140173261635583,
+STORE, 140173261635584, 140173261778943,
+STORE, 140173263863808, 140173263871999,
+STORE, 140173263876096, 140173263880191,
+STORE, 140173263880192, 140173263884287,
+STORE, 140173263884288, 140173263888383,
+STORE, 140729801007104, 140729801142271,
+STORE, 140729801617408, 140729801629695,
+STORE, 140729801629696, 140729801633791,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728166858752, 140737488351231,
+SNULL, 140728166862847, 140737488351231,
+STORE, 140728166858752, 140728166862847,
+STORE, 140728166727680, 140728166862847,
+STORE, 93912949866496, 93912950337535,
+SNULL, 93912950288383, 93912950337535,
+STORE, 93912949866496, 93912950288383,
+STORE, 93912950288384, 93912950337535,
+ERASE, 93912950288384, 93912950337535,
+STORE, 93912950292480, 93912950337535,
+STORE, 139921863385088, 139921865637887,
+SNULL, 139921863528447, 139921865637887,
+STORE, 139921863385088, 139921863528447,
+STORE, 139921863528448, 139921865637887,
+ERASE, 139921863528448, 139921865637887,
+STORE, 139921865625600, 139921865633791,
+STORE, 139921865633792, 139921865637887,
+STORE, 140728167899136, 140728167903231,
+STORE, 140728167886848, 140728167899135,
+STORE, 139921865601024, 139921865625599,
+STORE, 139921865592832, 139921865601023,
+STORE, 139921861251072, 139921863385087,
+SNULL, 139921861251072, 139921861279743,
+STORE, 139921861279744, 139921863385087,
+STORE, 139921861251072, 139921861279743,
+SNULL, 139921863376895, 139921863385087,
+STORE, 139921861279744, 139921863376895,
+STORE, 139921863376896, 139921863385087,
+ERASE, 139921863376896, 139921863385087,
+STORE, 139921863376896, 139921863385087,
+STORE, 139921858867200, 139921861251071,
+SNULL, 139921858867200, 139921859133439,
+STORE, 139921859133440, 139921861251071,
+STORE, 139921858867200, 139921859133439,
+SNULL, 139921861226495, 139921861251071,
+STORE, 139921859133440, 139921861226495,
+STORE, 139921861226496, 139921861251071,
+SNULL, 139921861226496, 139921861246975,
+STORE, 139921861246976, 139921861251071,
+STORE, 139921861226496, 139921861246975,
+ERASE, 139921861226496, 139921861246975,
+STORE, 139921861226496, 139921861246975,
+ERASE, 139921861246976, 139921861251071,
+STORE, 139921861246976, 139921861251071,
+STORE, 139921856675840, 139921858867199,
+SNULL, 139921856675840, 139921856765951,
+STORE, 139921856765952, 139921858867199,
+STORE, 139921856675840, 139921856765951,
+SNULL, 139921858859007, 139921858867199,
+STORE, 139921856765952, 139921858859007,
+STORE, 139921858859008, 139921858867199,
+ERASE, 139921858859008, 139921858867199,
+STORE, 139921858859008, 139921858867199,
+STORE, 139921854414848, 139921856675839,
+SNULL, 139921854414848, 139921854566399,
+STORE, 139921854566400, 139921856675839,
+STORE, 139921854414848, 139921854566399,
+SNULL, 139921856659455, 139921856675839,
+STORE, 139921854566400, 139921856659455,
+STORE, 139921856659456, 139921856675839,
+SNULL, 139921856659456, 139921856667647,
+STORE, 139921856667648, 139921856675839,
+STORE, 139921856659456, 139921856667647,
+ERASE, 139921856659456, 139921856667647,
+STORE, 139921856659456, 139921856667647,
+ERASE, 139921856667648, 139921856675839,
+STORE, 139921856667648, 139921856675839,
+STORE, 139921852284928, 139921854414847,
+SNULL, 139921852284928, 139921852313599,
+STORE, 139921852313600, 139921854414847,
+STORE, 139921852284928, 139921852313599,
+SNULL, 139921854406655, 139921854414847,
+STORE, 139921852313600, 139921854406655,
+STORE, 139921854406656, 139921854414847,
+ERASE, 139921854406656, 139921854414847,
+STORE, 139921854406656, 139921854414847,
+STORE, 139921850068992, 139921852284927,
+SNULL, 139921850068992, 139921850167295,
+STORE, 139921850167296, 139921852284927,
+STORE, 139921850068992, 139921850167295,
+SNULL, 139921852260351, 139921852284927,
+STORE, 139921850167296, 139921852260351,
+STORE, 139921852260352, 139921852284927,
+SNULL, 139921852260352, 139921852268543,
+STORE, 139921852268544, 139921852284927,
+STORE, 139921852260352, 139921852268543,
+ERASE, 139921852260352, 139921852268543,
+STORE, 139921852260352, 139921852268543,
+ERASE, 139921852268544, 139921852284927,
+STORE, 139921852268544, 139921852284927,
+STORE, 139921865584640, 139921865601023,
+STORE, 139921846272000, 139921850068991,
+SNULL, 139921846272000, 139921847930879,
+STORE, 139921847930880, 139921850068991,
+STORE, 139921846272000, 139921847930879,
+SNULL, 139921850028031, 139921850068991,
+STORE, 139921847930880, 139921850028031,
+STORE, 139921850028032, 139921850068991,
+SNULL, 139921850028032, 139921850052607,
+STORE, 139921850052608, 139921850068991,
+STORE, 139921850028032, 139921850052607,
+ERASE, 139921850028032, 139921850052607,
+STORE, 139921850028032, 139921850052607,
+ERASE, 139921850052608, 139921850068991,
+STORE, 139921850052608, 139921850068991,
+STORE, 139921844154368, 139921846271999,
+SNULL, 139921844154368, 139921844170751,
+STORE, 139921844170752, 139921846271999,
+STORE, 139921844154368, 139921844170751,
+SNULL, 139921846263807, 139921846271999,
+STORE, 139921844170752, 139921846263807,
+STORE, 139921846263808, 139921846271999,
+ERASE, 139921846263808, 139921846271999,
+STORE, 139921846263808, 139921846271999,
+STORE, 139921842036736, 139921844154367,
+SNULL, 139921842036736, 139921842053119,
+STORE, 139921842053120, 139921844154367,
+STORE, 139921842036736, 139921842053119,
+SNULL, 139921844146175, 139921844154367,
+STORE, 139921842053120, 139921844146175,
+STORE, 139921844146176, 139921844154367,
+ERASE, 139921844146176, 139921844154367,
+STORE, 139921844146176, 139921844154367,
+STORE, 139921839468544, 139921842036735,
+SNULL, 139921839468544, 139921839935487,
+STORE, 139921839935488, 139921842036735,
+STORE, 139921839468544, 139921839935487,
+SNULL, 139921842028543, 139921842036735,
+STORE, 139921839935488, 139921842028543,
+STORE, 139921842028544, 139921842036735,
+ERASE, 139921842028544, 139921842036735,
+STORE, 139921842028544, 139921842036735,
+STORE, 139921837355008, 139921839468543,
+SNULL, 139921837355008, 139921837367295,
+STORE, 139921837367296, 139921839468543,
+STORE, 139921837355008, 139921837367295,
+SNULL, 139921839460351, 139921839468543,
+STORE, 139921837367296, 139921839460351,
+STORE, 139921839460352, 139921839468543,
+ERASE, 139921839460352, 139921839468543,
+STORE, 139921839460352, 139921839468543,
+STORE, 139921865576448, 139921865601023,
+STORE, 139921865564160, 139921865601023,
+SNULL, 139921850044415, 139921850052607,
+STORE, 139921850028032, 139921850044415,
+STORE, 139921850044416, 139921850052607,
+SNULL, 139921839464447, 139921839468543,
+STORE, 139921839460352, 139921839464447,
+STORE, 139921839464448, 139921839468543,
+SNULL, 139921852264447, 139921852268543,
+STORE, 139921852260352, 139921852264447,
+STORE, 139921852264448, 139921852268543,
+SNULL, 139921842032639, 139921842036735,
+STORE, 139921842028544, 139921842032639,
+STORE, 139921842032640, 139921842036735,
+SNULL, 139921844150271, 139921844154367,
+STORE, 139921844146176, 139921844150271,
+STORE, 139921844150272, 139921844154367,
+SNULL, 139921846267903, 139921846271999,
+STORE, 139921846263808, 139921846267903,
+STORE, 139921846267904, 139921846271999,
+SNULL, 139921854410751, 139921854414847,
+STORE, 139921854406656, 139921854410751,
+STORE, 139921854410752, 139921854414847,
+SNULL, 139921856663551, 139921856667647,
+STORE, 139921856659456, 139921856663551,
+STORE, 139921856663552, 139921856667647,
+SNULL, 139921858863103, 139921858867199,
+STORE, 139921858859008, 139921858863103,
+STORE, 139921858863104, 139921858867199,
+SNULL, 139921861242879, 139921861246975,
+STORE, 139921861226496, 139921861242879,
+STORE, 139921861242880, 139921861246975,
+SNULL, 139921863380991, 139921863385087,
+STORE, 139921863376896, 139921863380991,
+STORE, 139921863380992, 139921863385087,
+SNULL, 93912950333439, 93912950337535,
+STORE, 93912950292480, 93912950333439,
+STORE, 93912950333440, 93912950337535,
+SNULL, 139921865629695, 139921865633791,
+STORE, 139921865625600, 139921865629695,
+STORE, 139921865629696, 139921865633791,
+ERASE, 139921865601024, 139921865625599,
+STORE, 93912968110080, 93912968245247,
+STORE, 139921828913152, 139921837355007,
+STORE, 139921865621504, 139921865625599,
+STORE, 139921865617408, 139921865621503,
+STORE, 139921865613312, 139921865617407,
+STORE, 139921865547776, 139921865564159,
+ };
+
+ unsigned long set17[] = {
+STORE, 94397057224704, 94397057646591,
+STORE, 94397057650688, 94397057691647,
+STORE, 94397057691648, 94397057695743,
+STORE, 94397075271680, 94397075406847,
+STORE, 139953169051648, 139953169063935,
+STORE, 139953169063936, 139953171156991,
+STORE, 139953171156992, 139953171161087,
+STORE, 139953171161088, 139953171165183,
+STORE, 139953171165184, 139953171632127,
+STORE, 139953171632128, 139953173725183,
+STORE, 139953173725184, 139953173729279,
+STORE, 139953173729280, 139953173733375,
+STORE, 139953173733376, 139953173749759,
+STORE, 139953173749760, 139953175842815,
+STORE, 139953175842816, 139953175846911,
+STORE, 139953175846912, 139953175851007,
+STORE, 139953175851008, 139953175867391,
+STORE, 139953175867392, 139953177960447,
+STORE, 139953177960448, 139953177964543,
+STORE, 139953177964544, 139953177968639,
+STORE, 139953177968640, 139953179627519,
+STORE, 139953179627520, 139953181724671,
+STORE, 139953181724672, 139953181741055,
+STORE, 139953181741056, 139953181749247,
+STORE, 139953181749248, 139953181765631,
+STORE, 139953181765632, 139953181863935,
+STORE, 139953181863936, 139953183956991,
+STORE, 139953183956992, 139953183961087,
+STORE, 139953183961088, 139953183965183,
+STORE, 139953183965184, 139953183981567,
+STORE, 139953183981568, 139953184010239,
+STORE, 139953184010240, 139953186103295,
+STORE, 139953186103296, 139953186107391,
+STORE, 139953186107392, 139953186111487,
+STORE, 139953186111488, 139953186263039,
+STORE, 139953186263040, 139953188356095,
+STORE, 139953188356096, 139953188360191,
+STORE, 139953188360192, 139953188364287,
+STORE, 139953188364288, 139953188372479,
+STORE, 139953188372480, 139953188462591,
+STORE, 139953188462592, 139953190555647,
+STORE, 139953190555648, 139953190559743,
+STORE, 139953190559744, 139953190563839,
+STORE, 139953190563840, 139953190830079,
+STORE, 139953190830080, 139953192923135,
+STORE, 139953192923136, 139953192939519,
+STORE, 139953192939520, 139953192943615,
+STORE, 139953192943616, 139953192947711,
+STORE, 139953192947712, 139953192976383,
+STORE, 139953192976384, 139953195073535,
+STORE, 139953195073536, 139953195077631,
+STORE, 139953195077632, 139953195081727,
+STORE, 139953195081728, 139953195225087,
+STORE, 139953197281280, 139953197318143,
+STORE, 139953197322240, 139953197326335,
+STORE, 139953197326336, 139953197330431,
+STORE, 139953197330432, 139953197334527,
+STORE, 140720477511680, 140720477646847,
+STORE, 140720478302208, 140720478314495,
+STORE, 140720478314496, 140720478318591,
+ };
+ unsigned long set18[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140724953673728, 140737488351231,
+SNULL, 140724953677823, 140737488351231,
+STORE, 140724953673728, 140724953677823,
+STORE, 140724953542656, 140724953677823,
+STORE, 94675199266816, 94675199311871,
+SNULL, 94675199303679, 94675199311871,
+STORE, 94675199266816, 94675199303679,
+STORE, 94675199303680, 94675199311871,
+ERASE, 94675199303680, 94675199311871,
+STORE, 94675199303680, 94675199311871,
+STORE, 140222970605568, 140222972858367,
+SNULL, 140222970748927, 140222972858367,
+STORE, 140222970605568, 140222970748927,
+STORE, 140222970748928, 140222972858367,
+ERASE, 140222970748928, 140222972858367,
+STORE, 140222972846080, 140222972854271,
+STORE, 140222972854272, 140222972858367,
+STORE, 140724954365952, 140724954370047,
+STORE, 140724954353664, 140724954365951,
+STORE, 140222972841984, 140222972846079,
+STORE, 140222972833792, 140222972841983,
+STORE, 140222968475648, 140222970605567,
+SNULL, 140222968475648, 140222968504319,
+STORE, 140222968504320, 140222970605567,
+STORE, 140222968475648, 140222968504319,
+SNULL, 140222970597375, 140222970605567,
+STORE, 140222968504320, 140222970597375,
+STORE, 140222970597376, 140222970605567,
+ERASE, 140222970597376, 140222970605567,
+STORE, 140222970597376, 140222970605567,
+ };
+ unsigned long set19[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140725182459904, 140737488351231,
+SNULL, 140725182463999, 140737488351231,
+STORE, 140725182459904, 140725182463999,
+STORE, 140725182328832, 140725182463999,
+STORE, 94730166636544, 94730166763519,
+SNULL, 94730166747135, 94730166763519,
+STORE, 94730166636544, 94730166747135,
+STORE, 94730166747136, 94730166763519,
+ERASE, 94730166747136, 94730166763519,
+STORE, 94730166751232, 94730166763519,
+STORE, 140656834555904, 140656836808703,
+SNULL, 140656834699263, 140656836808703,
+STORE, 140656834555904, 140656834699263,
+STORE, 140656834699264, 140656836808703,
+ERASE, 140656834699264, 140656836808703,
+STORE, 140656836796416, 140656836804607,
+STORE, 140656836804608, 140656836808703,
+STORE, 140725183389696, 140725183393791,
+STORE, 140725183377408, 140725183389695,
+STORE, 140656836788224, 140656836796415,
+STORE, 140656832331776, 140656834555903,
+SNULL, 140656833982463, 140656834555903,
+STORE, 140656832331776, 140656833982463,
+STORE, 140656833982464, 140656834555903,
+SNULL, 140656833982464, 140656834551807,
+STORE, 140656834551808, 140656834555903,
+STORE, 140656833982464, 140656834551807,
+ERASE, 140656833982464, 140656834551807,
+STORE, 140656833982464, 140656834551807,
+ERASE, 140656834551808, 140656834555903,
+STORE, 140656834551808, 140656834555903,
+STORE, 140656836763648, 140656836788223,
+STORE, 140656830070784, 140656832331775,
+SNULL, 140656830070784, 140656830222335,
+STORE, 140656830222336, 140656832331775,
+STORE, 140656830070784, 140656830222335,
+SNULL, 140656832315391, 140656832331775,
+STORE, 140656830222336, 140656832315391,
+STORE, 140656832315392, 140656832331775,
+SNULL, 140656832315392, 140656832323583,
+STORE, 140656832323584, 140656832331775,
+STORE, 140656832315392, 140656832323583,
+ERASE, 140656832315392, 140656832323583,
+STORE, 140656832315392, 140656832323583,
+ERASE, 140656832323584, 140656832331775,
+STORE, 140656832323584, 140656832331775,
+STORE, 140656827940864, 140656830070783,
+SNULL, 140656827940864, 140656827969535,
+STORE, 140656827969536, 140656830070783,
+STORE, 140656827940864, 140656827969535,
+SNULL, 140656830062591, 140656830070783,
+STORE, 140656827969536, 140656830062591,
+STORE, 140656830062592, 140656830070783,
+ERASE, 140656830062592, 140656830070783,
+STORE, 140656830062592, 140656830070783,
+STORE, 140656825724928, 140656827940863,
+SNULL, 140656825724928, 140656825823231,
+STORE, 140656825823232, 140656827940863,
+STORE, 140656825724928, 140656825823231,
+SNULL, 140656827916287, 140656827940863,
+STORE, 140656825823232, 140656827916287,
+STORE, 140656827916288, 140656827940863,
+SNULL, 140656827916288, 140656827924479,
+STORE, 140656827924480, 140656827940863,
+STORE, 140656827916288, 140656827924479,
+ERASE, 140656827916288, 140656827924479,
+STORE, 140656827916288, 140656827924479,
+ERASE, 140656827924480, 140656827940863,
+STORE, 140656827924480, 140656827940863,
+STORE, 140656821927936, 140656825724927,
+SNULL, 140656821927936, 140656823586815,
+STORE, 140656823586816, 140656825724927,
+STORE, 140656821927936, 140656823586815,
+SNULL, 140656825683967, 140656825724927,
+STORE, 140656823586816, 140656825683967,
+STORE, 140656825683968, 140656825724927,
+SNULL, 140656825683968, 140656825708543,
+STORE, 140656825708544, 140656825724927,
+STORE, 140656825683968, 140656825708543,
+ERASE, 140656825683968, 140656825708543,
+STORE, 140656825683968, 140656825708543,
+ERASE, 140656825708544, 140656825724927,
+STORE, 140656825708544, 140656825724927,
+STORE, 140656819806208, 140656821927935,
+SNULL, 140656819806208, 140656819822591,
+STORE, 140656819822592, 140656821927935,
+STORE, 140656819806208, 140656819822591,
+SNULL, 140656821919743, 140656821927935,
+STORE, 140656819822592, 140656821919743,
+STORE, 140656821919744, 140656821927935,
+ERASE, 140656821919744, 140656821927935,
+STORE, 140656821919744, 140656821927935,
+STORE, 140656836755456, 140656836763647,
+STORE, 140656817553408, 140656819806207,
+SNULL, 140656817553408, 140656817704959,
+STORE, 140656817704960, 140656819806207,
+STORE, 140656817553408, 140656817704959,
+SNULL, 140656819798015, 140656819806207,
+STORE, 140656817704960, 140656819798015,
+STORE, 140656819798016, 140656819806207,
+ERASE, 140656819798016, 140656819806207,
+STORE, 140656819798016, 140656819806207,
+STORE, 140656815382528, 140656817553407,
+SNULL, 140656815382528, 140656815452159,
+STORE, 140656815452160, 140656817553407,
+STORE, 140656815382528, 140656815452159,
+SNULL, 140656817545215, 140656817553407,
+STORE, 140656815452160, 140656817545215,
+STORE, 140656817545216, 140656817553407,
+ERASE, 140656817545216, 140656817553407,
+STORE, 140656817545216, 140656817553407,
+STORE, 140656812171264, 140656815382527,
+SNULL, 140656812171264, 140656813248511,
+STORE, 140656813248512, 140656815382527,
+STORE, 140656812171264, 140656813248511,
+SNULL, 140656815345663, 140656815382527,
+STORE, 140656813248512, 140656815345663,
+STORE, 140656815345664, 140656815382527,
+ERASE, 140656815345664, 140656815382527,
+STORE, 140656815345664, 140656815382527,
+STORE, 140656810037248, 140656812171263,
+SNULL, 140656810037248, 140656810065919,
+STORE, 140656810065920, 140656812171263,
+STORE, 140656810037248, 140656810065919,
+SNULL, 140656812163071, 140656812171263,
+STORE, 140656810065920, 140656812163071,
+STORE, 140656812163072, 140656812171263,
+ERASE, 140656812163072, 140656812171263,
+STORE, 140656812163072, 140656812171263,
+STORE, 140656807727104, 140656810037247,
+SNULL, 140656807727104, 140656807931903,
+STORE, 140656807931904, 140656810037247,
+STORE, 140656807727104, 140656807931903,
+SNULL, 140656810029055, 140656810037247,
+STORE, 140656807931904, 140656810029055,
+STORE, 140656810029056, 140656810037247,
+ERASE, 140656810029056, 140656810037247,
+STORE, 140656810029056, 140656810037247,
+STORE, 140656805343232, 140656807727103,
+SNULL, 140656805343232, 140656805535743,
+STORE, 140656805535744, 140656807727103,
+STORE, 140656805343232, 140656805535743,
+SNULL, 140656807628799, 140656807727103,
+STORE, 140656805535744, 140656807628799,
+STORE, 140656807628800, 140656807727103,
+ERASE, 140656807628800, 140656807727103,
+STORE, 140656807628800, 140656807727103,
+STORE, 140656836747264, 140656836763647,
+STORE, 140656802775040, 140656805343231,
+SNULL, 140656802775040, 140656803241983,
+STORE, 140656803241984, 140656805343231,
+STORE, 140656802775040, 140656803241983,
+SNULL, 140656805335039, 140656805343231,
+STORE, 140656803241984, 140656805335039,
+STORE, 140656805335040, 140656805343231,
+ERASE, 140656805335040, 140656805343231,
+STORE, 140656805335040, 140656805343231,
+STORE, 140656800661504, 140656802775039,
+SNULL, 140656800661504, 140656800673791,
+STORE, 140656800673792, 140656802775039,
+STORE, 140656800661504, 140656800673791,
+SNULL, 140656802766847, 140656802775039,
+STORE, 140656800673792, 140656802766847,
+STORE, 140656802766848, 140656802775039,
+ERASE, 140656802766848, 140656802775039,
+STORE, 140656802766848, 140656802775039,
+STORE, 140656798482432, 140656800661503,
+SNULL, 140656798482432, 140656798560255,
+STORE, 140656798560256, 140656800661503,
+STORE, 140656798482432, 140656798560255,
+SNULL, 140656800653311, 140656800661503,
+STORE, 140656798560256, 140656800653311,
+STORE, 140656800653312, 140656800661503,
+ERASE, 140656800653312, 140656800661503,
+STORE, 140656800653312, 140656800661503,
+STORE, 140656796364800, 140656798482431,
+SNULL, 140656796364800, 140656796381183,
+STORE, 140656796381184, 140656798482431,
+STORE, 140656796364800, 140656796381183,
+SNULL, 140656798474239, 140656798482431,
+STORE, 140656796381184, 140656798474239,
+STORE, 140656798474240, 140656798482431,
+ERASE, 140656798474240, 140656798482431,
+STORE, 140656798474240, 140656798482431,
+STORE, 140656836739072, 140656836763647,
+STORE, 140656836726784, 140656836763647,
+SNULL, 140656825700351, 140656825708543,
+STORE, 140656825683968, 140656825700351,
+STORE, 140656825700352, 140656825708543,
+SNULL, 140656798478335, 140656798482431,
+STORE, 140656798474240, 140656798478335,
+STORE, 140656798478336, 140656798482431,
+SNULL, 140656800657407, 140656800661503,
+STORE, 140656800653312, 140656800657407,
+STORE, 140656800657408, 140656800661503,
+SNULL, 140656802770943, 140656802775039,
+STORE, 140656802766848, 140656802770943,
+STORE, 140656802770944, 140656802775039,
+SNULL, 140656827920383, 140656827924479,
+STORE, 140656827916288, 140656827920383,
+STORE, 140656827920384, 140656827924479,
+SNULL, 140656805339135, 140656805343231,
+STORE, 140656805335040, 140656805339135,
+STORE, 140656805339136, 140656805343231,
+SNULL, 140656807723007, 140656807727103,
+STORE, 140656807628800, 140656807723007,
+STORE, 140656807723008, 140656807727103,
+SNULL, 140656810033151, 140656810037247,
+STORE, 140656810029056, 140656810033151,
+STORE, 140656810033152, 140656810037247,
+SNULL, 140656812167167, 140656812171263,
+STORE, 140656812163072, 140656812167167,
+STORE, 140656812167168, 140656812171263,
+SNULL, 140656815353855, 140656815382527,
+STORE, 140656815345664, 140656815353855,
+STORE, 140656815353856, 140656815382527,
+SNULL, 140656817549311, 140656817553407,
+STORE, 140656817545216, 140656817549311,
+STORE, 140656817549312, 140656817553407,
+SNULL, 140656819802111, 140656819806207,
+STORE, 140656819798016, 140656819802111,
+STORE, 140656819802112, 140656819806207,
+SNULL, 140656821923839, 140656821927935,
+STORE, 140656821919744, 140656821923839,
+STORE, 140656821923840, 140656821927935,
+SNULL, 140656830066687, 140656830070783,
+STORE, 140656830062592, 140656830066687,
+STORE, 140656830066688, 140656830070783,
+SNULL, 140656832319487, 140656832323583,
+STORE, 140656832315392, 140656832319487,
+STORE, 140656832319488, 140656832323583,
+SNULL, 140656834547711, 140656834551807,
+STORE, 140656833982464, 140656834547711,
+STORE, 140656834547712, 140656834551807,
+SNULL, 94730166759423, 94730166763519,
+STORE, 94730166751232, 94730166759423,
+STORE, 94730166759424, 94730166763519,
+SNULL, 140656836800511, 140656836804607,
+STORE, 140656836796416, 140656836800511,
+STORE, 140656836800512, 140656836804607,
+ERASE, 140656836763648, 140656836788223,
+STORE, 94730171318272, 94730171453439,
+STORE, 140656836784128, 140656836788223,
+STORE, 140656836780032, 140656836784127,
+STORE, 140656791920640, 140656796364799,
+STORE, 140656836775936, 140656836780031,
+STORE, 140656787476480, 140656791920639,
+STORE, 140656779083776, 140656787476479,
+SNULL, 140656779087871, 140656787476479,
+STORE, 140656779083776, 140656779087871,
+STORE, 140656779087872, 140656787476479,
+STORE, 140656836771840, 140656836775935,
+STORE, 140656774639616, 140656779083775,
+STORE, 140656766246912, 140656774639615,
+SNULL, 140656766251007, 140656774639615,
+STORE, 140656766246912, 140656766251007,
+STORE, 140656766251008, 140656774639615,
+ERASE, 140656791920640, 140656796364799,
+ERASE, 140656836780032, 140656836784127,
+ERASE, 140656787476480, 140656791920639,
+ERASE, 140656836775936, 140656836780031,
+STORE, 140656836780032, 140656836784127,
+STORE, 140656791920640, 140656796364799,
+STORE, 140656836775936, 140656836780031,
+STORE, 140656787476480, 140656791920639,
+ERASE, 140656774639616, 140656779083775,
+ };
+ unsigned long set20[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735952392192, 140737488351231,
+SNULL, 140735952396287, 140737488351231,
+STORE, 140735952392192, 140735952396287,
+STORE, 140735952261120, 140735952396287,
+STORE, 94849008947200, 94849009414143,
+SNULL, 94849009364991, 94849009414143,
+STORE, 94849008947200, 94849009364991,
+STORE, 94849009364992, 94849009414143,
+ERASE, 94849009364992, 94849009414143,
+STORE, 94849009364992, 94849009414143,
+STORE, 140590397943808, 140590400196607,
+SNULL, 140590398087167, 140590400196607,
+STORE, 140590397943808, 140590398087167,
+STORE, 140590398087168, 140590400196607,
+ERASE, 140590398087168, 140590400196607,
+STORE, 140590400184320, 140590400192511,
+STORE, 140590400192512, 140590400196607,
+STORE, 140735952850944, 140735952855039,
+STORE, 140735952838656, 140735952850943,
+STORE, 140590400180224, 140590400184319,
+STORE, 140590400172032, 140590400180223,
+STORE, 140590395809792, 140590397943807,
+SNULL, 140590395809792, 140590395838463,
+STORE, 140590395838464, 140590397943807,
+STORE, 140590395809792, 140590395838463,
+SNULL, 140590397935615, 140590397943807,
+STORE, 140590395838464, 140590397935615,
+STORE, 140590397935616, 140590397943807,
+ERASE, 140590397935616, 140590397943807,
+STORE, 140590397935616, 140590397943807,
+STORE, 140590393425920, 140590395809791,
+SNULL, 140590393425920, 140590393692159,
+STORE, 140590393692160, 140590395809791,
+STORE, 140590393425920, 140590393692159,
+SNULL, 140590395785215, 140590395809791,
+STORE, 140590393692160, 140590395785215,
+STORE, 140590395785216, 140590395809791,
+SNULL, 140590395785216, 140590395805695,
+STORE, 140590395805696, 140590395809791,
+STORE, 140590395785216, 140590395805695,
+ERASE, 140590395785216, 140590395805695,
+STORE, 140590395785216, 140590395805695,
+ERASE, 140590395805696, 140590395809791,
+STORE, 140590395805696, 140590395809791,
+STORE, 140590391234560, 140590393425919,
+SNULL, 140590391234560, 140590391324671,
+STORE, 140590391324672, 140590393425919,
+STORE, 140590391234560, 140590391324671,
+SNULL, 140590393417727, 140590393425919,
+STORE, 140590391324672, 140590393417727,
+STORE, 140590393417728, 140590393425919,
+ERASE, 140590393417728, 140590393425919,
+STORE, 140590393417728, 140590393425919,
+STORE, 140590388973568, 140590391234559,
+SNULL, 140590388973568, 140590389125119,
+STORE, 140590389125120, 140590391234559,
+STORE, 140590388973568, 140590389125119,
+SNULL, 140590391218175, 140590391234559,
+STORE, 140590389125120, 140590391218175,
+STORE, 140590391218176, 140590391234559,
+SNULL, 140590391218176, 140590391226367,
+STORE, 140590391226368, 140590391234559,
+STORE, 140590391218176, 140590391226367,
+ERASE, 140590391218176, 140590391226367,
+STORE, 140590391218176, 140590391226367,
+ERASE, 140590391226368, 140590391234559,
+STORE, 140590391226368, 140590391234559,
+STORE, 140590386843648, 140590388973567,
+SNULL, 140590386843648, 140590386872319,
+STORE, 140590386872320, 140590388973567,
+STORE, 140590386843648, 140590386872319,
+SNULL, 140590388965375, 140590388973567,
+STORE, 140590386872320, 140590388965375,
+STORE, 140590388965376, 140590388973567,
+ERASE, 140590388965376, 140590388973567,
+STORE, 140590388965376, 140590388973567,
+STORE, 140590384627712, 140590386843647,
+SNULL, 140590384627712, 140590384726015,
+STORE, 140590384726016, 140590386843647,
+STORE, 140590384627712, 140590384726015,
+SNULL, 140590386819071, 140590386843647,
+STORE, 140590384726016, 140590386819071,
+STORE, 140590386819072, 140590386843647,
+SNULL, 140590386819072, 140590386827263,
+STORE, 140590386827264, 140590386843647,
+STORE, 140590386819072, 140590386827263,
+ERASE, 140590386819072, 140590386827263,
+STORE, 140590386819072, 140590386827263,
+ERASE, 140590386827264, 140590386843647,
+STORE, 140590386827264, 140590386843647,
+STORE, 140590400163840, 140590400180223,
+STORE, 140590380830720, 140590384627711,
+SNULL, 140590380830720, 140590382489599,
+STORE, 140590382489600, 140590384627711,
+STORE, 140590380830720, 140590382489599,
+SNULL, 140590384586751, 140590384627711,
+STORE, 140590382489600, 140590384586751,
+STORE, 140590384586752, 140590384627711,
+SNULL, 140590384586752, 140590384611327,
+STORE, 140590384611328, 140590384627711,
+STORE, 140590384586752, 140590384611327,
+ERASE, 140590384586752, 140590384611327,
+STORE, 140590384586752, 140590384611327,
+ERASE, 140590384611328, 140590384627711,
+STORE, 140590384611328, 140590384627711,
+STORE, 140590378713088, 140590380830719,
+SNULL, 140590378713088, 140590378729471,
+STORE, 140590378729472, 140590380830719,
+STORE, 140590378713088, 140590378729471,
+SNULL, 140590380822527, 140590380830719,
+STORE, 140590378729472, 140590380822527,
+STORE, 140590380822528, 140590380830719,
+ERASE, 140590380822528, 140590380830719,
+STORE, 140590380822528, 140590380830719,
+STORE, 140590376595456, 140590378713087,
+SNULL, 140590376595456, 140590376611839,
+STORE, 140590376611840, 140590378713087,
+STORE, 140590376595456, 140590376611839,
+SNULL, 140590378704895, 140590378713087,
+STORE, 140590376611840, 140590378704895,
+STORE, 140590378704896, 140590378713087,
+ERASE, 140590378704896, 140590378713087,
+STORE, 140590378704896, 140590378713087,
+STORE, 140590374027264, 140590376595455,
+SNULL, 140590374027264, 140590374494207,
+STORE, 140590374494208, 140590376595455,
+STORE, 140590374027264, 140590374494207,
+SNULL, 140590376587263, 140590376595455,
+STORE, 140590374494208, 140590376587263,
+STORE, 140590376587264, 140590376595455,
+ERASE, 140590376587264, 140590376595455,
+STORE, 140590376587264, 140590376595455,
+STORE, 140590371913728, 140590374027263,
+SNULL, 140590371913728, 140590371926015,
+STORE, 140590371926016, 140590374027263,
+STORE, 140590371913728, 140590371926015,
+SNULL, 140590374019071, 140590374027263,
+STORE, 140590371926016, 140590374019071,
+STORE, 140590374019072, 140590374027263,
+ERASE, 140590374019072, 140590374027263,
+STORE, 140590374019072, 140590374027263,
+STORE, 140590400155648, 140590400180223,
+STORE, 140590400143360, 140590400180223,
+SNULL, 140590384603135, 140590384611327,
+STORE, 140590384586752, 140590384603135,
+STORE, 140590384603136, 140590384611327,
+SNULL, 140590374023167, 140590374027263,
+STORE, 140590374019072, 140590374023167,
+STORE, 140590374023168, 140590374027263,
+SNULL, 140590386823167, 140590386827263,
+STORE, 140590386819072, 140590386823167,
+STORE, 140590386823168, 140590386827263,
+SNULL, 140590376591359, 140590376595455,
+ };
+ unsigned long set21[] = {
+STORE, 93874710941696, 93874711363583,
+STORE, 93874711367680, 93874711408639,
+STORE, 93874711408640, 93874711412735,
+STORE, 93874720989184, 93874721124351,
+STORE, 140708365086720, 140708365099007,
+STORE, 140708365099008, 140708367192063,
+STORE, 140708367192064, 140708367196159,
+STORE, 140708367196160, 140708367200255,
+STORE, 140708367200256, 140708367667199,
+STORE, 140708367667200, 140708369760255,
+STORE, 140708369760256, 140708369764351,
+STORE, 140708369764352, 140708369768447,
+STORE, 140708369768448, 140708369784831,
+STORE, 140708369784832, 140708371877887,
+STORE, 140708371877888, 140708371881983,
+STORE, 140708371881984, 140708371886079,
+STORE, 140708371886080, 140708371902463,
+STORE, 140708371902464, 140708373995519,
+STORE, 140708373995520, 140708373999615,
+STORE, 140708373999616, 140708374003711,
+STORE, 140708374003712, 140708375662591,
+STORE, 140708375662592, 140708377759743,
+STORE, 140708377759744, 140708377776127,
+STORE, 140708377776128, 140708377784319,
+STORE, 140708377784320, 140708377800703,
+STORE, 140708377800704, 140708377899007,
+STORE, 140708377899008, 140708379992063,
+STORE, 140708379992064, 140708379996159,
+STORE, 140708379996160, 140708380000255,
+STORE, 140708380000256, 140708380016639,
+STORE, 140708380016640, 140708380045311,
+STORE, 140708380045312, 140708382138367,
+STORE, 140708382138368, 140708382142463,
+STORE, 140708382142464, 140708382146559,
+STORE, 140708382146560, 140708382298111,
+STORE, 140708382298112, 140708384391167,
+STORE, 140708384391168, 140708384395263,
+STORE, 140708384395264, 140708384399359,
+STORE, 140708384399360, 140708384407551,
+STORE, 140708384407552, 140708384497663,
+STORE, 140708384497664, 140708386590719,
+STORE, 140708386590720, 140708386594815,
+STORE, 140708386594816, 140708386598911,
+STORE, 140708386598912, 140708386865151,
+STORE, 140708386865152, 140708388958207,
+STORE, 140708388958208, 140708388974591,
+STORE, 140708388974592, 140708388978687,
+STORE, 140708388978688, 140708388982783,
+STORE, 140708388982784, 140708389011455,
+STORE, 140708389011456, 140708391108607,
+STORE, 140708391108608, 140708391112703,
+STORE, 140708391112704, 140708391116799,
+STORE, 140708391116800, 140708391260159,
+STORE, 140708393291776, 140708393308159,
+STORE, 140708393308160, 140708393312255,
+STORE, 140708393312256, 140708393316351,
+STORE, 140708393316352, 140708393353215,
+STORE, 140708393353216, 140708393357311,
+STORE, 140708393357312, 140708393361407,
+STORE, 140708393361408, 140708393365503,
+STORE, 140708393365504, 140708393369599,
+STORE, 140730557042688, 140730557177855,
+STORE, 140730557235200, 140730557247487,
+STORE, 140730557247488, 140730557251583,
+ERASE, 140708393353216, 140708393357311,
+ERASE, 140708393312256, 140708393316351,
+ERASE, 140708393308160, 140708393312255,
+ERASE, 140708393291776, 140708393308159,
+ };
+ unsigned long set22[] = {
+STORE, 93951397134336, 93951397183487,
+STORE, 93951397183488, 93951397728255,
+STORE, 93951397728256, 93951397826559,
+STORE, 93951397826560, 93951397842943,
+STORE, 93951397842944, 93951397847039,
+STORE, 93951425974272, 93951426109439,
+STORE, 140685152665600, 140685152677887,
+STORE, 140685152677888, 140685152829439,
+STORE, 140685152829440, 140685154181119,
+STORE, 140685154181120, 140685154484223,
+STORE, 140685154484224, 140685154496511,
+STORE, 140685154496512, 140685154508799,
+STORE, 140685154508800, 140685154525183,
+STORE, 140685154525184, 140685154541567,
+STORE, 140685154541568, 140685154590719,
+STORE, 140685154590720, 140685154603007,
+STORE, 140685154603008, 140685154607103,
+STORE, 140685154607104, 140685154611199,
+STORE, 140685154611200, 140685154615295,
+STORE, 140685154615296, 140685154631679,
+STORE, 140685154639872, 140685154643967,
+STORE, 140685154643968, 140685154766847,
+STORE, 140685154766848, 140685154799615,
+STORE, 140685154803712, 140685154807807,
+STORE, 140685154807808, 140685154811903,
+STORE, 140685154811904, 140685154815999,
+STORE, 140722188902400, 140722189037567,
+STORE, 140722189512704, 140722189524991,
+STORE, 140722189524992, 140722189529087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733429354496, 140737488351231,
+SNULL, 140733429358591, 140737488351231,
+STORE, 140733429354496, 140733429358591,
+STORE, 140733429223424, 140733429358591,
+STORE, 94526683537408, 94526683660287,
+SNULL, 94526683553791, 94526683660287,
+STORE, 94526683537408, 94526683553791,
+STORE, 94526683553792, 94526683660287,
+ERASE, 94526683553792, 94526683660287,
+STORE, 94526683553792, 94526683623423,
+STORE, 94526683623424, 94526683647999,
+STORE, 94526683652096, 94526683660287,
+STORE, 140551363747840, 140551363923967,
+SNULL, 140551363751935, 140551363923967,
+STORE, 140551363747840, 140551363751935,
+STORE, 140551363751936, 140551363923967,
+ERASE, 140551363751936, 140551363923967,
+STORE, 140551363751936, 140551363874815,
+STORE, 140551363874816, 140551363907583,
+STORE, 140551363911680, 140551363919871,
+STORE, 140551363919872, 140551363923967,
+STORE, 140733429690368, 140733429694463,
+STORE, 140733429678080, 140733429690367,
+STORE, 140551363739648, 140551363747839,
+STORE, 140551363731456, 140551363739647,
+STORE, 140551363379200, 140551363731455,
+SNULL, 140551363379200, 140551363420159,
+STORE, 140551363420160, 140551363731455,
+STORE, 140551363379200, 140551363420159,
+SNULL, 140551363706879, 140551363731455,
+STORE, 140551363420160, 140551363706879,
+STORE, 140551363706880, 140551363731455,
+SNULL, 140551363420160, 140551363637247,
+STORE, 140551363637248, 140551363706879,
+STORE, 140551363420160, 140551363637247,
+ERASE, 140551363420160, 140551363637247,
+STORE, 140551363420160, 140551363637247,
+SNULL, 140551363637248, 140551363702783,
+STORE, 140551363702784, 140551363706879,
+STORE, 140551363637248, 140551363702783,
+ERASE, 140551363637248, 140551363702783,
+STORE, 140551363637248, 140551363702783,
+ERASE, 140551363706880, 140551363731455,
+STORE, 140551363706880, 140551363731455,
+STORE, 140551361531904, 140551363379199,
+SNULL, 140551361683455, 140551363379199,
+STORE, 140551361531904, 140551361683455,
+STORE, 140551361683456, 140551363379199,
+SNULL, 140551361683456, 140551363035135,
+STORE, 140551363035136, 140551363379199,
+STORE, 140551361683456, 140551363035135,
+ERASE, 140551361683456, 140551363035135,
+STORE, 140551361683456, 140551363035135,
+SNULL, 140551363035136, 140551363338239,
+STORE, 140551363338240, 140551363379199,
+STORE, 140551363035136, 140551363338239,
+ERASE, 140551363035136, 140551363338239,
+STORE, 140551363035136, 140551363379199,
+SNULL, 140551363338239, 140551363379199,
+STORE, 140551363035136, 140551363338239,
+STORE, 140551363338240, 140551363379199,
+SNULL, 140551363338240, 140551363362815,
+STORE, 140551363362816, 140551363379199,
+STORE, 140551363338240, 140551363362815,
+ERASE, 140551363338240, 140551363362815,
+STORE, 140551363338240, 140551363362815,
+ERASE, 140551363362816, 140551363379199,
+STORE, 140551363362816, 140551363379199,
+STORE, 140551361519616, 140551361531903,
+SNULL, 140551363350527, 140551363362815,
+STORE, 140551363338240, 140551363350527,
+STORE, 140551363350528, 140551363362815,
+SNULL, 140551363727359, 140551363731455,
+STORE, 140551363706880, 140551363727359,
+STORE, 140551363727360, 140551363731455,
+SNULL, 94526683656191, 94526683660287,
+STORE, 94526683652096, 94526683656191,
+STORE, 94526683656192, 94526683660287,
+SNULL, 140551363915775, 140551363919871,
+STORE, 140551363911680, 140551363915775,
+STORE, 140551363915776, 140551363919871,
+ERASE, 140551363739648, 140551363747839,
+STORE, 94526715490304, 94526715625471,
+STORE, 140551361253376, 140551361531903,
+STORE, 140551360987136, 140551361531903,
+STORE, 140551360720896, 140551361531903,
+STORE, 140551360454656, 140551361531903,
+SNULL, 140551361253375, 140551361531903,
+STORE, 140551360454656, 140551361253375,
+STORE, 140551361253376, 140551361531903,
+SNULL, 140551361253376, 140551361519615,
+STORE, 140551361519616, 140551361531903,
+STORE, 140551361253376, 140551361519615,
+ERASE, 140551361253376, 140551361519615,
+ };
+
+ unsigned long set23[] = {
+STORE, 94014447943680, 94014448156671,
+STORE, 94014450253824, 94014450257919,
+STORE, 94014450257920, 94014450266111,
+STORE, 94014450266112, 94014450278399,
+STORE, 94014464225280, 94014464630783,
+STORE, 139761764306944, 139761765965823,
+STORE, 139761765965824, 139761768062975,
+STORE, 139761768062976, 139761768079359,
+STORE, 139761768079360, 139761768087551,
+STORE, 139761768087552, 139761768103935,
+STORE, 139761768103936, 139761768116223,
+STORE, 139761768116224, 139761770209279,
+STORE, 139761770209280, 139761770213375,
+STORE, 139761770213376, 139761770217471,
+STORE, 139761770217472, 139761770360831,
+STORE, 139761770729472, 139761772412927,
+STORE, 139761772412928, 139761772429311,
+STORE, 139761772457984, 139761772462079,
+STORE, 139761772462080, 139761772466175,
+STORE, 139761772466176, 139761772470271,
+STORE, 140724336517120, 140724336652287,
+STORE, 140724336955392, 140724336967679,
+STORE, 140724336967680, 140724336971775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140721840295936, 140737488351231,
+SNULL, 140721840300031, 140737488351231,
+STORE, 140721840295936, 140721840300031,
+STORE, 140721840164864, 140721840300031,
+STORE, 93937913667584, 93937915830271,
+SNULL, 93937913729023, 93937915830271,
+STORE, 93937913667584, 93937913729023,
+STORE, 93937913729024, 93937915830271,
+ERASE, 93937913729024, 93937915830271,
+STORE, 93937915822080, 93937915830271,
+STORE, 140598835335168, 140598837587967,
+SNULL, 140598835478527, 140598837587967,
+STORE, 140598835335168, 140598835478527,
+STORE, 140598835478528, 140598837587967,
+ERASE, 140598835478528, 140598837587967,
+STORE, 140598837575680, 140598837583871,
+STORE, 140598837583872, 140598837587967,
+STORE, 140721841086464, 140721841090559,
+STORE, 140721841074176, 140721841086463,
+STORE, 140598837547008, 140598837575679,
+STORE, 140598837538816, 140598837547007,
+STORE, 140598831538176, 140598835335167,
+SNULL, 140598831538176, 140598833197055,
+STORE, 140598833197056, 140598835335167,
+STORE, 140598831538176, 140598833197055,
+SNULL, 140598835294207, 140598835335167,
+STORE, 140598833197056, 140598835294207,
+STORE, 140598835294208, 140598835335167,
+SNULL, 140598835294208, 140598835318783,
+STORE, 140598835318784, 140598835335167,
+STORE, 140598835294208, 140598835318783,
+ERASE, 140598835294208, 140598835318783,
+STORE, 140598835294208, 140598835318783,
+ERASE, 140598835318784, 140598835335167,
+STORE, 140598835318784, 140598835335167,
+SNULL, 140598835310591, 140598835318783,
+STORE, 140598835294208, 140598835310591,
+STORE, 140598835310592, 140598835318783,
+SNULL, 93937915826175, 93937915830271,
+STORE, 93937915822080, 93937915826175,
+STORE, 93937915826176, 93937915830271,
+SNULL, 140598837579775, 140598837583871,
+STORE, 140598837575680, 140598837579775,
+STORE, 140598837579776, 140598837583871,
+ERASE, 140598837547008, 140598837575679,
+STORE, 93937929179136, 93937929314303,
+STORE, 140598835855360, 140598837538815,
+STORE, 140737488347136, 140737488351231,
+STORE, 140728187723776, 140737488351231,
+SNULL, 140728187727871, 140737488351231,
+STORE, 140728187723776, 140728187727871,
+STORE, 140728187592704, 140728187727871,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140583951437824, 140583953690623,
+SNULL, 140583951581183, 140583953690623,
+STORE, 140583951437824, 140583951581183,
+STORE, 140583951581184, 140583953690623,
+ERASE, 140583951581184, 140583953690623,
+STORE, 140583953678336, 140583953686527,
+STORE, 140583953686528, 140583953690623,
+STORE, 140728189116416, 140728189120511,
+STORE, 140728189104128, 140728189116415,
+STORE, 140583953649664, 140583953678335,
+STORE, 140583953641472, 140583953649663,
+STORE, 140583948275712, 140583951437823,
+SNULL, 140583948275712, 140583949336575,
+STORE, 140583949336576, 140583951437823,
+STORE, 140583948275712, 140583949336575,
+SNULL, 140583951429631, 140583951437823,
+STORE, 140583949336576, 140583951429631,
+STORE, 140583951429632, 140583951437823,
+ERASE, 140583951429632, 140583951437823,
+STORE, 140583951429632, 140583951437823,
+STORE, 140583944478720, 140583948275711,
+SNULL, 140583944478720, 140583946137599,
+STORE, 140583946137600, 140583948275711,
+STORE, 140583944478720, 140583946137599,
+SNULL, 140583948234751, 140583948275711,
+STORE, 140583946137600, 140583948234751,
+STORE, 140583948234752, 140583948275711,
+SNULL, 140583948234752, 140583948259327,
+STORE, 140583948259328, 140583948275711,
+STORE, 140583948234752, 140583948259327,
+ERASE, 140583948234752, 140583948259327,
+STORE, 140583948234752, 140583948259327,
+ERASE, 140583948259328, 140583948275711,
+STORE, 140583948259328, 140583948275711,
+STORE, 140583953629184, 140583953649663,
+SNULL, 140583948251135, 140583948259327,
+STORE, 140583948234752, 140583948251135,
+STORE, 140583948251136, 140583948259327,
+SNULL, 140583951433727, 140583951437823,
+STORE, 140583951429632, 140583951433727,
+STORE, 140583951433728, 140583951437823,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140583953682431, 140583953686527,
+STORE, 140583953678336, 140583953682431,
+STORE, 140583953682432, 140583953686527,
+ERASE, 140583953649664, 140583953678335,
+STORE, 17821696, 17956863,
+STORE, 17821696, 18104319,
+STORE, 140583951945728, 140583953629183,
+STORE, 94014447943680, 94014448156671,
+STORE, 94014450253824, 94014450257919,
+STORE, 94014450257920, 94014450266111,
+STORE, 94014450266112, 94014450278399,
+STORE, 94014464225280, 94014465196031,
+STORE, 139761764306944, 139761765965823,
+STORE, 139761765965824, 139761768062975,
+STORE, 139761768062976, 139761768079359,
+STORE, 139761768079360, 139761768087551,
+STORE, 139761768087552, 139761768103935,
+STORE, 139761768103936, 139761768116223,
+STORE, 139761768116224, 139761770209279,
+STORE, 139761770209280, 139761770213375,
+STORE, 139761770213376, 139761770217471,
+STORE, 139761770217472, 139761770360831,
+STORE, 139761770729472, 139761772412927,
+STORE, 139761772412928, 139761772429311,
+STORE, 139761772457984, 139761772462079,
+STORE, 139761772462080, 139761772466175,
+STORE, 139761772466176, 139761772470271,
+STORE, 140724336517120, 140724336652287,
+STORE, 140724336955392, 140724336967679,
+STORE, 140724336967680, 140724336971775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726063296512, 140737488351231,
+SNULL, 140726063300607, 140737488351231,
+STORE, 140726063296512, 140726063300607,
+STORE, 140726063165440, 140726063300607,
+STORE, 94016795934720, 94016798158847,
+SNULL, 94016796045311, 94016798158847,
+STORE, 94016795934720, 94016796045311,
+STORE, 94016796045312, 94016798158847,
+ERASE, 94016796045312, 94016798158847,
+STORE, 94016798138368, 94016798150655,
+STORE, 94016798150656, 94016798158847,
+STORE, 139975915966464, 139975918219263,
+SNULL, 139975916109823, 139975918219263,
+STORE, 139975915966464, 139975916109823,
+STORE, 139975916109824, 139975918219263,
+ERASE, 139975916109824, 139975918219263,
+STORE, 139975918206976, 139975918215167,
+STORE, 139975918215168, 139975918219263,
+STORE, 140726064541696, 140726064545791,
+STORE, 140726064529408, 140726064541695,
+STORE, 139975918178304, 139975918206975,
+STORE, 139975918170112, 139975918178303,
+STORE, 139975912169472, 139975915966463,
+SNULL, 139975912169472, 139975913828351,
+STORE, 139975913828352, 139975915966463,
+STORE, 139975912169472, 139975913828351,
+SNULL, 139975915925503, 139975915966463,
+STORE, 139975913828352, 139975915925503,
+STORE, 139975915925504, 139975915966463,
+SNULL, 139975915925504, 139975915950079,
+STORE, 139975915950080, 139975915966463,
+STORE, 139975915925504, 139975915950079,
+ERASE, 139975915925504, 139975915950079,
+STORE, 139975915925504, 139975915950079,
+ERASE, 139975915950080, 139975915966463,
+STORE, 139975915950080, 139975915966463,
+SNULL, 139975915941887, 139975915950079,
+STORE, 139975915925504, 139975915941887,
+STORE, 139975915941888, 139975915950079,
+SNULL, 94016798146559, 94016798150655,
+STORE, 94016798138368, 94016798146559,
+STORE, 94016798146560, 94016798150655,
+SNULL, 139975918211071, 139975918215167,
+STORE, 139975918206976, 139975918211071,
+STORE, 139975918211072, 139975918215167,
+ERASE, 139975918178304, 139975918206975,
+STORE, 94016804925440, 94016805060607,
+STORE, 94596177661952, 94596177772543,
+STORE, 94596179865600, 94596179873791,
+STORE, 94596179873792, 94596179877887,
+STORE, 94596179877888, 94596179886079,
+STORE, 94596211597312, 94596211863551,
+STORE, 140127351840768, 140127353499647,
+STORE, 140127353499648, 140127355596799,
+STORE, 140127355596800, 140127355613183,
+STORE, 140127355613184, 140127355621375,
+STORE, 140127355621376, 140127355637759,
+STORE, 140127355637760, 140127355781119,
+STORE, 140127357841408, 140127357849599,
+STORE, 140127357878272, 140127357882367,
+STORE, 140127357882368, 140127357886463,
+STORE, 140127357886464, 140127357890559,
+STORE, 140726167252992, 140726167392255,
+STORE, 140726167838720, 140726167851007,
+STORE, 140726167851008, 140726167855103,
+STORE, 140737488347136, 140737488351231,
+STORE, 140731874017280, 140737488351231,
+SNULL, 140731874021375, 140737488351231,
+STORE, 140731874017280, 140731874021375,
+STORE, 140731873886208, 140731874021375,
+STORE, 94178682265600, 94178684489727,
+SNULL, 94178682376191, 94178684489727,
+STORE, 94178682265600, 94178682376191,
+STORE, 94178682376192, 94178684489727,
+ERASE, 94178682376192, 94178684489727,
+STORE, 94178684469248, 94178684481535,
+STORE, 94178684481536, 94178684489727,
+STORE, 140460853403648, 140460855656447,
+SNULL, 140460853547007, 140460855656447,
+STORE, 140460853403648, 140460853547007,
+STORE, 140460853547008, 140460855656447,
+ERASE, 140460853547008, 140460855656447,
+STORE, 140460855644160, 140460855652351,
+STORE, 140460855652352, 140460855656447,
+STORE, 140731874103296, 140731874107391,
+STORE, 140731874091008, 140731874103295,
+STORE, 140460855615488, 140460855644159,
+STORE, 140460855607296, 140460855615487,
+STORE, 140460849606656, 140460853403647,
+SNULL, 140460849606656, 140460851265535,
+STORE, 140460851265536, 140460853403647,
+STORE, 140460849606656, 140460851265535,
+SNULL, 140460853362687, 140460853403647,
+STORE, 140460851265536, 140460853362687,
+STORE, 140460853362688, 140460853403647,
+SNULL, 140460853362688, 140460853387263,
+STORE, 140460853387264, 140460853403647,
+STORE, 140460853362688, 140460853387263,
+ERASE, 140460853362688, 140460853387263,
+STORE, 140460853362688, 140460853387263,
+ERASE, 140460853387264, 140460853403647,
+STORE, 140460853387264, 140460853403647,
+SNULL, 140460853379071, 140460853387263,
+STORE, 140460853362688, 140460853379071,
+STORE, 140460853379072, 140460853387263,
+SNULL, 94178684477439, 94178684481535,
+STORE, 94178684469248, 94178684477439,
+STORE, 94178684477440, 94178684481535,
+SNULL, 140460855648255, 140460855652351,
+STORE, 140460855644160, 140460855648255,
+STORE, 140460855648256, 140460855652351,
+ERASE, 140460855615488, 140460855644159,
+STORE, 94178692063232, 94178692198399,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733096603648, 140737488351231,
+SNULL, 140733096611839, 140737488351231,
+STORE, 140733096603648, 140733096611839,
+STORE, 140733096472576, 140733096611839,
+STORE, 94796716122112, 94796718325759,
+SNULL, 94796716224511, 94796718325759,
+STORE, 94796716122112, 94796716224511,
+STORE, 94796716224512, 94796718325759,
+ERASE, 94796716224512, 94796718325759,
+STORE, 94796718317568, 94796718325759,
+STORE, 139667892793344, 139667895046143,
+SNULL, 139667892936703, 139667895046143,
+STORE, 139667892793344, 139667892936703,
+STORE, 139667892936704, 139667895046143,
+ERASE, 139667892936704, 139667895046143,
+STORE, 139667895033856, 139667895042047,
+STORE, 139667895042048, 139667895046143,
+STORE, 140733096857600, 140733096861695,
+STORE, 140733096845312, 140733096857599,
+STORE, 139667895005184, 139667895033855,
+STORE, 139667894996992, 139667895005183,
+STORE, 139667890532352, 139667892793343,
+SNULL, 139667890532352, 139667890683903,
+STORE, 139667890683904, 139667892793343,
+STORE, 139667890532352, 139667890683903,
+SNULL, 139667892776959, 139667892793343,
+STORE, 139667890683904, 139667892776959,
+STORE, 139667892776960, 139667892793343,
+SNULL, 139667892776960, 139667892785151,
+STORE, 139667892785152, 139667892793343,
+STORE, 139667892776960, 139667892785151,
+ERASE, 139667892776960, 139667892785151,
+STORE, 139667892776960, 139667892785151,
+ERASE, 139667892785152, 139667892793343,
+STORE, 139667892785152, 139667892793343,
+STORE, 139667886735360, 139667890532351,
+SNULL, 139667886735360, 139667888394239,
+STORE, 139667888394240, 139667890532351,
+STORE, 139667886735360, 139667888394239,
+SNULL, 139667890491391, 139667890532351,
+STORE, 139667888394240, 139667890491391,
+STORE, 139667890491392, 139667890532351,
+SNULL, 139667890491392, 139667890515967,
+STORE, 139667890515968, 139667890532351,
+STORE, 139667890491392, 139667890515967,
+ERASE, 139667890491392, 139667890515967,
+STORE, 139667890491392, 139667890515967,
+ERASE, 139667890515968, 139667890532351,
+STORE, 139667890515968, 139667890532351,
+STORE, 139667884167168, 139667886735359,
+SNULL, 139667884167168, 139667884634111,
+STORE, 139667884634112, 139667886735359,
+STORE, 139667884167168, 139667884634111,
+SNULL, 139667886727167, 139667886735359,
+STORE, 139667884634112, 139667886727167,
+STORE, 139667886727168, 139667886735359,
+ERASE, 139667886727168, 139667886735359,
+STORE, 139667886727168, 139667886735359,
+STORE, 139667882053632, 139667884167167,
+SNULL, 139667882053632, 139667882065919,
+STORE, 139667882065920, 139667884167167,
+STORE, 139667882053632, 139667882065919,
+SNULL, 139667884158975, 139667884167167,
+STORE, 139667882065920, 139667884158975,
+STORE, 139667884158976, 139667884167167,
+ERASE, 139667884158976, 139667884167167,
+STORE, 139667884158976, 139667884167167,
+STORE, 139667879837696, 139667882053631,
+SNULL, 139667879837696, 139667879935999,
+STORE, 139667879936000, 139667882053631,
+STORE, 139667879837696, 139667879935999,
+SNULL, 139667882029055, 139667882053631,
+STORE, 139667879936000, 139667882029055,
+STORE, 139667882029056, 139667882053631,
+SNULL, 139667882029056, 139667882037247,
+STORE, 139667882037248, 139667882053631,
+STORE, 139667882029056, 139667882037247,
+ERASE, 139667882029056, 139667882037247,
+STORE, 139667882029056, 139667882037247,
+ERASE, 139667882037248, 139667882053631,
+STORE, 139667882037248, 139667882053631,
+STORE, 139667894988800, 139667895005183,
+SNULL, 139667890507775, 139667890515967,
+STORE, 139667890491392, 139667890507775,
+STORE, 139667890507776, 139667890515967,
+SNULL, 139667882033151, 139667882037247,
+STORE, 139667882029056, 139667882033151,
+STORE, 139667882033152, 139667882037247,
+SNULL, 139667884163071, 139667884167167,
+STORE, 139667884158976, 139667884163071,
+STORE, 139667884163072, 139667884167167,
+SNULL, 139667886731263, 139667886735359,
+STORE, 139667886727168, 139667886731263,
+STORE, 139667886731264, 139667886735359,
+SNULL, 139667892781055, 139667892785151,
+STORE, 139667892776960, 139667892781055,
+STORE, 139667892781056, 139667892785151,
+SNULL, 94796718321663, 94796718325759,
+STORE, 94796718317568, 94796718321663,
+STORE, 94796718321664, 94796718325759,
+SNULL, 139667895037951, 139667895042047,
+STORE, 139667895033856, 139667895037951,
+STORE, 139667895037952, 139667895042047,
+ERASE, 139667895005184, 139667895033855,
+STORE, 94796726063104, 94796726198271,
+STORE, 139667893305344, 139667894988799,
+STORE, 139667895005184, 139667895033855,
+STORE, 94796726063104, 94796726333439,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722489507840, 140737488351231,
+SNULL, 140722489516031, 140737488351231,
+STORE, 140722489507840, 140722489516031,
+STORE, 140722489376768, 140722489516031,
+STORE, 93980993265664, 93980995489791,
+SNULL, 93980993376255, 93980995489791,
+STORE, 93980993265664, 93980993376255,
+STORE, 93980993376256, 93980995489791,
+ERASE, 93980993376256, 93980995489791,
+STORE, 93980995469312, 93980995481599,
+STORE, 93980995481600, 93980995489791,
+STORE, 140261313593344, 140261315846143,
+SNULL, 140261313736703, 140261315846143,
+STORE, 140261313593344, 140261313736703,
+STORE, 140261313736704, 140261315846143,
+ERASE, 140261313736704, 140261315846143,
+STORE, 140261315833856, 140261315842047,
+STORE, 140261315842048, 140261315846143,
+STORE, 140722489675776, 140722489679871,
+STORE, 140722489663488, 140722489675775,
+STORE, 140261315805184, 140261315833855,
+STORE, 140261315796992, 140261315805183,
+STORE, 140261309796352, 140261313593343,
+SNULL, 140261309796352, 140261311455231,
+STORE, 140261311455232, 140261313593343,
+STORE, 140261309796352, 140261311455231,
+SNULL, 140261313552383, 140261313593343,
+STORE, 140261311455232, 140261313552383,
+STORE, 140261313552384, 140261313593343,
+SNULL, 140261313552384, 140261313576959,
+STORE, 140261313576960, 140261313593343,
+STORE, 140261313552384, 140261313576959,
+ERASE, 140261313552384, 140261313576959,
+STORE, 140261313552384, 140261313576959,
+ERASE, 140261313576960, 140261313593343,
+STORE, 140261313576960, 140261313593343,
+SNULL, 140261313568767, 140261313576959,
+STORE, 140261313552384, 140261313568767,
+STORE, 140261313568768, 140261313576959,
+SNULL, 93980995477503, 93980995481599,
+STORE, 93980995469312, 93980995477503,
+STORE, 93980995477504, 93980995481599,
+SNULL, 140261315837951, 140261315842047,
+STORE, 140261315833856, 140261315837951,
+STORE, 140261315837952, 140261315842047,
+ERASE, 140261315805184, 140261315833855,
+STORE, 93980997443584, 93980997578751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140734059450368, 140737488351231,
+SNULL, 140734059462655, 140737488351231,
+STORE, 140734059450368, 140734059462655,
+STORE, 140734059319296, 140734059462655,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140307554983936, 140307557236735,
+SNULL, 140307555127295, 140307557236735,
+STORE, 140307554983936, 140307555127295,
+STORE, 140307555127296, 140307557236735,
+ERASE, 140307555127296, 140307557236735,
+STORE, 140307557224448, 140307557232639,
+STORE, 140307557232640, 140307557236735,
+STORE, 140734059483136, 140734059487231,
+STORE, 140734059470848, 140734059483135,
+STORE, 140307557195776, 140307557224447,
+STORE, 140307557187584, 140307557195775,
+STORE, 140307551821824, 140307554983935,
+SNULL, 140307551821824, 140307552882687,
+STORE, 140307552882688, 140307554983935,
+STORE, 140307551821824, 140307552882687,
+SNULL, 140307554975743, 140307554983935,
+STORE, 140307552882688, 140307554975743,
+STORE, 140307554975744, 140307554983935,
+ERASE, 140307554975744, 140307554983935,
+STORE, 140307554975744, 140307554983935,
+STORE, 140307548024832, 140307551821823,
+SNULL, 140307548024832, 140307549683711,
+STORE, 140307549683712, 140307551821823,
+STORE, 140307548024832, 140307549683711,
+SNULL, 140307551780863, 140307551821823,
+STORE, 140307549683712, 140307551780863,
+STORE, 140307551780864, 140307551821823,
+SNULL, 140307551780864, 140307551805439,
+STORE, 140307551805440, 140307551821823,
+STORE, 140307551780864, 140307551805439,
+ERASE, 140307551780864, 140307551805439,
+STORE, 140307551780864, 140307551805439,
+ERASE, 140307551805440, 140307551821823,
+STORE, 140307551805440, 140307551821823,
+STORE, 140307557175296, 140307557195775,
+SNULL, 140307551797247, 140307551805439,
+STORE, 140307551780864, 140307551797247,
+STORE, 140307551797248, 140307551805439,
+SNULL, 140307554979839, 140307554983935,
+STORE, 140307554975744, 140307554979839,
+STORE, 140307554979840, 140307554983935,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140307557228543, 140307557232639,
+STORE, 140307557224448, 140307557228543,
+STORE, 140307557228544, 140307557232639,
+ERASE, 140307557195776, 140307557224447,
+STORE, 39698432, 39833599,
+STORE, 39698432, 39981055,
+STORE, 94306485321728, 94306485432319,
+STORE, 94306487525376, 94306487533567,
+STORE, 94306487533568, 94306487537663,
+STORE, 94306487537664, 94306487545855,
+STORE, 94306488868864, 94306489004031,
+STORE, 140497673998336, 140497675657215,
+STORE, 140497675657216, 140497677754367,
+STORE, 140497677754368, 140497677770751,
+STORE, 140497677770752, 140497677778943,
+STORE, 140497677778944, 140497677795327,
+STORE, 140497677795328, 140497677938687,
+STORE, 140497679998976, 140497680007167,
+STORE, 140497680035840, 140497680039935,
+STORE, 140497680039936, 140497680044031,
+STORE, 140497680044032, 140497680048127,
+STORE, 140732780462080, 140732780601343,
+STORE, 140732782239744, 140732782252031,
+STORE, 140732782252032, 140732782256127,
+STORE, 94236915900416, 94236916011007,
+STORE, 94236918104064, 94236918112255,
+STORE, 94236918112256, 94236918116351,
+STORE, 94236918116352, 94236918124543,
+STORE, 94236939489280, 94236939624447,
+STORE, 140046091743232, 140046093402111,
+STORE, 140046093402112, 140046095499263,
+STORE, 140046095499264, 140046095515647,
+STORE, 140046095515648, 140046095523839,
+STORE, 140046095523840, 140046095540223,
+STORE, 140046095540224, 140046095683583,
+STORE, 140046097743872, 140046097752063,
+STORE, 140046097780736, 140046097784831,
+STORE, 140046097784832, 140046097788927,
+STORE, 140046097788928, 140046097793023,
+STORE, 140726694449152, 140726694588415,
+STORE, 140726695313408, 140726695325695,
+STORE, 140726695325696, 140726695329791,
+STORE, 94894582779904, 94894582992895,
+STORE, 94894585090048, 94894585094143,
+STORE, 94894585094144, 94894585102335,
+STORE, 94894585102336, 94894585114623,
+STORE, 94894592868352, 94894594293759,
+STORE, 139733563842560, 139733565501439,
+STORE, 139733565501440, 139733567598591,
+STORE, 139733567598592, 139733567614975,
+STORE, 139733567614976, 139733567623167,
+STORE, 139733567623168, 139733567639551,
+STORE, 139733567639552, 139733567651839,
+STORE, 139733567651840, 139733569744895,
+STORE, 139733569744896, 139733569748991,
+STORE, 139733569748992, 139733569753087,
+STORE, 139733569753088, 139733569896447,
+STORE, 139733570265088, 139733571948543,
+STORE, 139733571948544, 139733571964927,
+STORE, 139733571993600, 139733571997695,
+STORE, 139733571997696, 139733572001791,
+STORE, 139733572001792, 139733572005887,
+STORE, 140726369255424, 140726369394687,
+STORE, 140726370402304, 140726370414591,
+STORE, 140726370414592, 140726370418687,
+STORE, 94899236483072, 94899236696063,
+STORE, 94899238793216, 94899238797311,
+STORE, 94899238797312, 94899238805503,
+STORE, 94899238805504, 94899238817791,
+STORE, 94899263045632, 94899263979519,
+STORE, 140040959893504, 140040961552383,
+STORE, 140040961552384, 140040963649535,
+STORE, 140040963649536, 140040963665919,
+STORE, 140040963665920, 140040963674111,
+STORE, 140040963674112, 140040963690495,
+STORE, 140040963690496, 140040963702783,
+STORE, 140040963702784, 140040965795839,
+STORE, 140040965795840, 140040965799935,
+STORE, 140040965799936, 140040965804031,
+STORE, 140040965804032, 140040965947391,
+STORE, 140040966316032, 140040967999487,
+STORE, 140040967999488, 140040968015871,
+STORE, 140040968044544, 140040968048639,
+STORE, 140040968048640, 140040968052735,
+STORE, 140040968052736, 140040968056831,
+STORE, 140729921359872, 140729921499135,
+STORE, 140729921613824, 140729921626111,
+STORE, 140729921626112, 140729921630207,
+STORE, 94818265190400, 94818265403391,
+STORE, 94818267500544, 94818267504639,
+STORE, 94818267504640, 94818267512831,
+STORE, 94818267512832, 94818267525119,
+STORE, 94818283372544, 94818285858815,
+STORE, 139818425675776, 139818427334655,
+STORE, 139818427334656, 139818429431807,
+STORE, 139818429431808, 139818429448191,
+STORE, 139818429448192, 139818429456383,
+STORE, 139818429456384, 139818429472767,
+STORE, 139818429472768, 139818429485055,
+STORE, 139818429485056, 139818431578111,
+STORE, 139818431578112, 139818431582207,
+STORE, 139818431582208, 139818431586303,
+STORE, 139818431586304, 139818431729663,
+STORE, 139818432098304, 139818433781759,
+STORE, 139818433781760, 139818433798143,
+STORE, 139818433826816, 139818433830911,
+STORE, 139818433830912, 139818433835007,
+STORE, 139818433835008, 139818433839103,
+STORE, 140726170509312, 140726170648575,
+STORE, 140726171824128, 140726171836415,
+STORE, 140726171836416, 140726171840511,
+STORE, 94611513188352, 94611513401343,
+STORE, 94611515498496, 94611515502591,
+STORE, 94611515502592, 94611515510783,
+STORE, 94611515510784, 94611515523071,
+STORE, 94611516502016, 94611516907519,
+STORE, 140596246388736, 140596248047615,
+STORE, 140596248047616, 140596250144767,
+STORE, 140596250144768, 140596250161151,
+STORE, 140596250161152, 140596250169343,
+STORE, 140596250169344, 140596250185727,
+STORE, 140596250185728, 140596250198015,
+STORE, 140596250198016, 140596252291071,
+STORE, 140596252291072, 140596252295167,
+STORE, 140596252295168, 140596252299263,
+STORE, 140596252299264, 140596252442623,
+STORE, 140596252811264, 140596254494719,
+STORE, 140596254494720, 140596254511103,
+STORE, 140596254539776, 140596254543871,
+STORE, 140596254543872, 140596254547967,
+STORE, 140596254547968, 140596254552063,
+STORE, 140731551338496, 140731551477759,
+STORE, 140731551780864, 140731551793151,
+STORE, 140731551793152, 140731551797247,
+STORE, 94313835851776, 94313836064767,
+STORE, 94313838161920, 94313838166015,
+STORE, 94313838166016, 94313838174207,
+STORE, 94313838174208, 94313838186495,
+STORE, 94313858416640, 94313861906431,
+STORE, 140693503918080, 140693505576959,
+STORE, 140693505576960, 140693507674111,
+STORE, 140693507674112, 140693507690495,
+STORE, 140693507690496, 140693507698687,
+STORE, 140693507698688, 140693507715071,
+STORE, 140693507715072, 140693507727359,
+STORE, 140693507727360, 140693509820415,
+STORE, 140693509820416, 140693509824511,
+STORE, 140693509824512, 140693509828607,
+STORE, 140693509828608, 140693509971967,
+STORE, 140693510340608, 140693512024063,
+STORE, 140693512024064, 140693512040447,
+STORE, 140693512069120, 140693512073215,
+STORE, 140693512073216, 140693512077311,
+STORE, 140693512077312, 140693512081407,
+STORE, 140721116065792, 140721116205055,
+STORE, 140721117831168, 140721117843455,
+STORE, 140721117843456, 140721117847551,
+STORE, 94843650150400, 94843650363391,
+STORE, 94843652460544, 94843652464639,
+STORE, 94843652464640, 94843652472831,
+STORE, 94843652472832, 94843652485119,
+STORE, 94843685388288, 94843686281215,
+STORE, 140484193681408, 140484195340287,
+STORE, 140484195340288, 140484197437439,
+STORE, 140484197437440, 140484197453823,
+STORE, 140484197453824, 140484197462015,
+STORE, 140484197462016, 140484197478399,
+STORE, 140484197478400, 140484197490687,
+STORE, 140484197490688, 140484199583743,
+STORE, 140484199583744, 140484199587839,
+STORE, 140484199587840, 140484199591935,
+STORE, 140484199591936, 140484199735295,
+STORE, 140484200103936, 140484201787391,
+STORE, 140484201787392, 140484201803775,
+STORE, 140484201832448, 140484201836543,
+STORE, 140484201836544, 140484201840639,
+STORE, 140484201840640, 140484201844735,
+STORE, 140726294315008, 140726294454271,
+STORE, 140726295646208, 140726295658495,
+STORE, 140726295658496, 140726295662591,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720422371328, 140737488351231,
+SNULL, 140720422379519, 140737488351231,
+STORE, 140720422371328, 140720422379519,
+STORE, 140720422240256, 140720422379519,
+STORE, 94417967845376, 94417970180095,
+SNULL, 94417968058367, 94417970180095,
+STORE, 94417967845376, 94417968058367,
+STORE, 94417968058368, 94417970180095,
+ERASE, 94417968058368, 94417970180095,
+STORE, 94417970155520, 94417970167807,
+STORE, 94417970167808, 94417970180095,
+STORE, 140252450045952, 140252452298751,
+SNULL, 140252450189311, 140252452298751,
+STORE, 140252450045952, 140252450189311,
+STORE, 140252450189312, 140252452298751,
+ERASE, 140252450189312, 140252452298751,
+STORE, 140252452286464, 140252452294655,
+STORE, 140252452294656, 140252452298751,
+STORE, 140720422416384, 140720422420479,
+STORE, 140720422404096, 140720422416383,
+STORE, 140252452257792, 140252452286463,
+STORE, 140252452249600, 140252452257791,
+STORE, 140252447932416, 140252450045951,
+SNULL, 140252447932416, 140252447944703,
+STORE, 140252447944704, 140252450045951,
+STORE, 140252447932416, 140252447944703,
+SNULL, 140252450037759, 140252450045951,
+STORE, 140252447944704, 140252450037759,
+STORE, 140252450037760, 140252450045951,
+ERASE, 140252450037760, 140252450045951,
+STORE, 140252450037760, 140252450045951,
+STORE, 140252444135424, 140252447932415,
+SNULL, 140252444135424, 140252445794303,
+STORE, 140252445794304, 140252447932415,
+STORE, 140252444135424, 140252445794303,
+SNULL, 140252447891455, 140252447932415,
+STORE, 140252445794304, 140252447891455,
+STORE, 140252447891456, 140252447932415,
+SNULL, 140252447891456, 140252447916031,
+STORE, 140252447916032, 140252447932415,
+STORE, 140252447891456, 140252447916031,
+ERASE, 140252447891456, 140252447916031,
+STORE, 140252447891456, 140252447916031,
+ERASE, 140252447916032, 140252447932415,
+STORE, 140252447916032, 140252447932415,
+STORE, 140252452241408, 140252452257791,
+SNULL, 140252447907839, 140252447916031,
+STORE, 140252447891456, 140252447907839,
+STORE, 140252447907840, 140252447916031,
+SNULL, 140252450041855, 140252450045951,
+STORE, 140252450037760, 140252450041855,
+STORE, 140252450041856, 140252450045951,
+SNULL, 94417970159615, 94417970167807,
+STORE, 94417970155520, 94417970159615,
+STORE, 94417970159616, 94417970167807,
+SNULL, 140252452290559, 140252452294655,
+STORE, 140252452286464, 140252452290559,
+STORE, 140252452290560, 140252452294655,
+ERASE, 140252452257792, 140252452286463,
+STORE, 94417996333056, 94417996468223,
+STORE, 140252450557952, 140252452241407,
+STORE, 94417996333056, 94417996603391,
+STORE, 94417996333056, 94417996738559,
+STORE, 94417996333056, 94417996910591,
+SNULL, 94417996881919, 94417996910591,
+STORE, 94417996333056, 94417996881919,
+STORE, 94417996881920, 94417996910591,
+ERASE, 94417996881920, 94417996910591,
+STORE, 94417996333056, 94417997017087,
+STORE, 94417996333056, 94417997152255,
+SNULL, 94417997135871, 94417997152255,
+STORE, 94417996333056, 94417997135871,
+STORE, 94417997135872, 94417997152255,
+ERASE, 94417997135872, 94417997152255,
+STORE, 94417996333056, 94417997291519,
+SNULL, 94417997271039, 94417997291519,
+STORE, 94417996333056, 94417997271039,
+STORE, 94417997271040, 94417997291519,
+ERASE, 94417997271040, 94417997291519,
+STORE, 94417996333056, 94417997406207,
+SNULL, 94417997381631, 94417997406207,
+STORE, 94417996333056, 94417997381631,
+STORE, 94417997381632, 94417997406207,
+ERASE, 94417997381632, 94417997406207,
+STORE, 94417996333056, 94417997516799,
+SNULL, 94417997488127, 94417997516799,
+STORE, 94417996333056, 94417997488127,
+STORE, 94417997488128, 94417997516799,
+ERASE, 94417997488128, 94417997516799,
+STORE, 94417996333056, 94417997643775,
+SNULL, 94417997631487, 94417997643775,
+STORE, 94417996333056, 94417997631487,
+STORE, 94417997631488, 94417997643775,
+ERASE, 94417997631488, 94417997643775,
+SNULL, 94417997590527, 94417997631487,
+STORE, 94417996333056, 94417997590527,
+STORE, 94417997590528, 94417997631487,
+ERASE, 94417997590528, 94417997631487,
+STORE, 94417996333056, 94417997733887,
+STORE, 94417996333056, 94417997869055,
+STORE, 94417996333056, 94417998004223,
+SNULL, 94417998000127, 94417998004223,
+STORE, 94417996333056, 94417998000127,
+STORE, 94417998000128, 94417998004223,
+ERASE, 94417998000128, 94417998004223,
+STORE, 94049170993152, 94049171206143,
+STORE, 94049173303296, 94049173307391,
+STORE, 94049173307392, 94049173315583,
+STORE, 94049173315584, 94049173327871,
+STORE, 94049176236032, 94049183645695,
+STORE, 139807795544064, 139807797202943,
+STORE, 139807797202944, 139807799300095,
+STORE, 139807799300096, 139807799316479,
+STORE, 139807799316480, 139807799324671,
+STORE, 139807799324672, 139807799341055,
+STORE, 139807799341056, 139807799353343,
+STORE, 139807799353344, 139807801446399,
+STORE, 139807801446400, 139807801450495,
+STORE, 139807801450496, 139807801454591,
+STORE, 139807801454592, 139807801597951,
+STORE, 139807801966592, 139807803650047,
+STORE, 139807803650048, 139807803666431,
+STORE, 139807803695104, 139807803699199,
+STORE, 139807803699200, 139807803703295,
+STORE, 139807803703296, 139807803707391,
+STORE, 140727555538944, 140727555678207,
+STORE, 140727555940352, 140727555952639,
+STORE, 140727555952640, 140727555956735,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722483441664, 140737488351231,
+SNULL, 140722483449855, 140737488351231,
+STORE, 140722483441664, 140722483449855,
+STORE, 140722483310592, 140722483449855,
+STORE, 94416704921600, 94416707145727,
+SNULL, 94416705032191, 94416707145727,
+STORE, 94416704921600, 94416705032191,
+STORE, 94416705032192, 94416707145727,
+ERASE, 94416705032192, 94416707145727,
+STORE, 94416707125248, 94416707137535,
+STORE, 94416707137536, 94416707145727,
+STORE, 140555439296512, 140555441549311,
+SNULL, 140555439439871, 140555441549311,
+STORE, 140555439296512, 140555439439871,
+STORE, 140555439439872, 140555441549311,
+ERASE, 140555439439872, 140555441549311,
+STORE, 140555441537024, 140555441545215,
+STORE, 140555441545216, 140555441549311,
+STORE, 140722484781056, 140722484785151,
+STORE, 140722484768768, 140722484781055,
+STORE, 140555441508352, 140555441537023,
+STORE, 140555441500160, 140555441508351,
+STORE, 140555435499520, 140555439296511,
+SNULL, 140555435499520, 140555437158399,
+STORE, 140555437158400, 140555439296511,
+STORE, 140555435499520, 140555437158399,
+SNULL, 140555439255551, 140555439296511,
+STORE, 140555437158400, 140555439255551,
+STORE, 140555439255552, 140555439296511,
+SNULL, 140555439255552, 140555439280127,
+STORE, 140555439280128, 140555439296511,
+STORE, 140555439255552, 140555439280127,
+ERASE, 140555439255552, 140555439280127,
+STORE, 140555439255552, 140555439280127,
+ERASE, 140555439280128, 140555439296511,
+STORE, 140555439280128, 140555439296511,
+SNULL, 140555439271935, 140555439280127,
+STORE, 140555439255552, 140555439271935,
+STORE, 140555439271936, 140555439280127,
+SNULL, 94416707133439, 94416707137535,
+STORE, 94416707125248, 94416707133439,
+STORE, 94416707133440, 94416707137535,
+SNULL, 140555441541119, 140555441545215,
+STORE, 140555441537024, 140555441541119,
+STORE, 140555441541120, 140555441545215,
+ERASE, 140555441508352, 140555441537023,
+STORE, 94416724672512, 94416724807679,
+STORE, 94686636953600, 94686637166591,
+STORE, 94686639263744, 94686639267839,
+STORE, 94686639267840, 94686639276031,
+STORE, 94686639276032, 94686639288319,
+STORE, 94686662193152, 94686663163903,
+STORE, 140312944431104, 140312946089983,
+STORE, 140312946089984, 140312948187135,
+STORE, 140312948187136, 140312948203519,
+STORE, 140312948203520, 140312948211711,
+STORE, 140312948211712, 140312948228095,
+STORE, 140312948228096, 140312948240383,
+STORE, 140312948240384, 140312950333439,
+STORE, 140312950333440, 140312950337535,
+STORE, 140312950337536, 140312950341631,
+STORE, 140312950341632, 140312950484991,
+STORE, 140312950853632, 140312952537087,
+STORE, 140312952537088, 140312952553471,
+STORE, 140312952582144, 140312952586239,
+STORE, 140312952586240, 140312952590335,
+STORE, 140312952590336, 140312952594431,
+STORE, 140730598920192, 140730599059455,
+STORE, 140730599108608, 140730599120895,
+STORE, 140730599120896, 140730599124991,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726234079232, 140737488351231,
+SNULL, 140726234087423, 140737488351231,
+STORE, 140726234079232, 140726234087423,
+STORE, 140726233948160, 140726234087423,
+STORE, 94589467578368, 94589469802495,
+SNULL, 94589467688959, 94589469802495,
+STORE, 94589467578368, 94589467688959,
+STORE, 94589467688960, 94589469802495,
+ERASE, 94589467688960, 94589469802495,
+STORE, 94589469782016, 94589469794303,
+STORE, 94589469794304, 94589469802495,
+STORE, 140587082842112, 140587085094911,
+SNULL, 140587082985471, 140587085094911,
+STORE, 140587082842112, 140587082985471,
+STORE, 140587082985472, 140587085094911,
+ERASE, 140587082985472, 140587085094911,
+STORE, 140587085082624, 140587085090815,
+STORE, 140587085090816, 140587085094911,
+STORE, 140726234103808, 140726234107903,
+STORE, 140726234091520, 140726234103807,
+STORE, 140587085053952, 140587085082623,
+STORE, 140587085045760, 140587085053951,
+STORE, 140587079045120, 140587082842111,
+SNULL, 140587079045120, 140587080703999,
+STORE, 140587080704000, 140587082842111,
+STORE, 140587079045120, 140587080703999,
+SNULL, 140587082801151, 140587082842111,
+STORE, 140587080704000, 140587082801151,
+STORE, 140587082801152, 140587082842111,
+SNULL, 140587082801152, 140587082825727,
+STORE, 140587082825728, 140587082842111,
+STORE, 140587082801152, 140587082825727,
+ERASE, 140587082801152, 140587082825727,
+STORE, 140587082801152, 140587082825727,
+ERASE, 140587082825728, 140587082842111,
+STORE, 140587082825728, 140587082842111,
+SNULL, 140587082817535, 140587082825727,
+STORE, 140587082801152, 140587082817535,
+STORE, 140587082817536, 140587082825727,
+SNULL, 94589469790207, 94589469794303,
+STORE, 94589469782016, 94589469790207,
+STORE, 94589469790208, 94589469794303,
+SNULL, 140587085086719, 140587085090815,
+STORE, 140587085082624, 140587085086719,
+STORE, 140587085086720, 140587085090815,
+ERASE, 140587085053952, 140587085082623,
+STORE, 94589477507072, 94589477642239,
+STORE, 94225448325120, 94225448538111,
+STORE, 94225450635264, 94225450639359,
+STORE, 94225450639360, 94225450647551,
+STORE, 94225450647552, 94225450659839,
+STORE, 94225470246912, 94225473548287,
+STORE, 140199245496320, 140199247155199,
+STORE, 140199247155200, 140199249252351,
+STORE, 140199249252352, 140199249268735,
+STORE, 140199249268736, 140199249276927,
+STORE, 140199249276928, 140199249293311,
+STORE, 140199249293312, 140199249305599,
+STORE, 140199249305600, 140199251398655,
+STORE, 140199251398656, 140199251402751,
+STORE, 140199251402752, 140199251406847,
+STORE, 140199251406848, 140199251550207,
+STORE, 140199251918848, 140199253602303,
+STORE, 140199253602304, 140199253618687,
+STORE, 140199253647360, 140199253651455,
+STORE, 140199253651456, 140199253655551,
+STORE, 140199253655552, 140199253659647,
+STORE, 140726264414208, 140726264553471,
+STORE, 140726265843712, 140726265855999,
+STORE, 140726265856000, 140726265860095,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733508358144, 140737488351231,
+SNULL, 140733508366335, 140737488351231,
+STORE, 140733508358144, 140733508366335,
+STORE, 140733508227072, 140733508366335,
+STORE, 94766263947264, 94766266171391,
+SNULL, 94766264057855, 94766266171391,
+STORE, 94766263947264, 94766264057855,
+STORE, 94766264057856, 94766266171391,
+ERASE, 94766264057856, 94766266171391,
+STORE, 94766266150912, 94766266163199,
+STORE, 94766266163200, 94766266171391,
+STORE, 140693985132544, 140693987385343,
+SNULL, 140693985275903, 140693987385343,
+STORE, 140693985132544, 140693985275903,
+STORE, 140693985275904, 140693987385343,
+ERASE, 140693985275904, 140693987385343,
+STORE, 140693987373056, 140693987381247,
+STORE, 140693987381248, 140693987385343,
+STORE, 140733509939200, 140733509943295,
+STORE, 140733509926912, 140733509939199,
+STORE, 140693987344384, 140693987373055,
+STORE, 140693987336192, 140693987344383,
+STORE, 140693981335552, 140693985132543,
+SNULL, 140693981335552, 140693982994431,
+STORE, 140693982994432, 140693985132543,
+STORE, 140693981335552, 140693982994431,
+SNULL, 140693985091583, 140693985132543,
+STORE, 140693982994432, 140693985091583,
+STORE, 140693985091584, 140693985132543,
+SNULL, 140693985091584, 140693985116159,
+STORE, 140693985116160, 140693985132543,
+STORE, 140693985091584, 140693985116159,
+ERASE, 140693985091584, 140693985116159,
+STORE, 140693985091584, 140693985116159,
+ERASE, 140693985116160, 140693985132543,
+STORE, 140693985116160, 140693985132543,
+SNULL, 140693985107967, 140693985116159,
+STORE, 140693985091584, 140693985107967,
+STORE, 140693985107968, 140693985116159,
+SNULL, 94766266159103, 94766266163199,
+STORE, 94766266150912, 94766266159103,
+STORE, 94766266159104, 94766266163199,
+SNULL, 140693987377151, 140693987381247,
+STORE, 140693987373056, 140693987377151,
+STORE, 140693987377152, 140693987381247,
+ERASE, 140693987344384, 140693987373055,
+STORE, 94766282035200, 94766282170367,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724769353728, 140737488351231,
+SNULL, 140724769361919, 140737488351231,
+STORE, 140724769353728, 140724769361919,
+STORE, 140724769222656, 140724769361919,
+STORE, 94710460526592, 94710462750719,
+SNULL, 94710460637183, 94710462750719,
+STORE, 94710460526592, 94710460637183,
+STORE, 94710460637184, 94710462750719,
+ERASE, 94710460637184, 94710462750719,
+STORE, 94710462730240, 94710462742527,
+STORE, 94710462742528, 94710462750719,
+STORE, 140469764395008, 140469766647807,
+SNULL, 140469764538367, 140469766647807,
+STORE, 140469764395008, 140469764538367,
+STORE, 140469764538368, 140469766647807,
+ERASE, 140469764538368, 140469766647807,
+STORE, 140469766635520, 140469766643711,
+STORE, 140469766643712, 140469766647807,
+STORE, 140724770877440, 140724770881535,
+STORE, 140724770865152, 140724770877439,
+STORE, 140469766606848, 140469766635519,
+STORE, 140469766598656, 140469766606847,
+STORE, 140469760598016, 140469764395007,
+SNULL, 140469760598016, 140469762256895,
+STORE, 140469762256896, 140469764395007,
+STORE, 140469760598016, 140469762256895,
+SNULL, 140469764354047, 140469764395007,
+STORE, 140469762256896, 140469764354047,
+STORE, 140469764354048, 140469764395007,
+SNULL, 140469764354048, 140469764378623,
+STORE, 140469764378624, 140469764395007,
+STORE, 140469764354048, 140469764378623,
+ERASE, 140469764354048, 140469764378623,
+STORE, 140469764354048, 140469764378623,
+ERASE, 140469764378624, 140469764395007,
+STORE, 140469764378624, 140469764395007,
+SNULL, 140469764370431, 140469764378623,
+STORE, 140469764354048, 140469764370431,
+STORE, 140469764370432, 140469764378623,
+SNULL, 94710462738431, 94710462742527,
+STORE, 94710462730240, 94710462738431,
+STORE, 94710462738432, 94710462742527,
+SNULL, 140469766639615, 140469766643711,
+STORE, 140469766635520, 140469766639615,
+STORE, 140469766639616, 140469766643711,
+ERASE, 140469766606848, 140469766635519,
+STORE, 94710485581824, 94710485716991,
+STORE, 94105755795456, 94105756008447,
+STORE, 94105758105600, 94105758109695,
+STORE, 94105758109696, 94105758117887,
+STORE, 94105758117888, 94105758130175,
+STORE, 94105788981248, 94105794871295,
+STORE, 140641190031360, 140641191690239,
+STORE, 140641191690240, 140641193787391,
+STORE, 140641193787392, 140641193803775,
+STORE, 140641193803776, 140641193811967,
+STORE, 140641193811968, 140641193828351,
+STORE, 140641193828352, 140641193840639,
+STORE, 140641193840640, 140641195933695,
+STORE, 140641195933696, 140641195937791,
+STORE, 140641195937792, 140641195941887,
+STORE, 140641195941888, 140641196085247,
+STORE, 140641196453888, 140641198137343,
+STORE, 140641198137344, 140641198153727,
+STORE, 140641198182400, 140641198186495,
+STORE, 140641198186496, 140641198190591,
+STORE, 140641198190592, 140641198194687,
+STORE, 140731980034048, 140731980173311,
+STORE, 140731981078528, 140731981090815,
+STORE, 140731981090816, 140731981094911,
+STORE, 93828086431744, 93828086644735,
+STORE, 93828088741888, 93828088745983,
+STORE, 93828088745984, 93828088754175,
+STORE, 93828088754176, 93828088766463,
+STORE, 93828094193664, 93828096831487,
+STORE, 139844717334528, 139844718993407,
+STORE, 139844718993408, 139844721090559,
+STORE, 139844721090560, 139844721106943,
+STORE, 139844721106944, 139844721115135,
+STORE, 139844721115136, 139844721131519,
+STORE, 139844721131520, 139844721143807,
+STORE, 139844721143808, 139844723236863,
+STORE, 139844723236864, 139844723240959,
+STORE, 139844723240960, 139844723245055,
+STORE, 139844723245056, 139844723388415,
+STORE, 139844723757056, 139844725440511,
+STORE, 139844725440512, 139844725456895,
+STORE, 139844725485568, 139844725489663,
+STORE, 139844725489664, 139844725493759,
+STORE, 139844725493760, 139844725497855,
+STORE, 140729996185600, 140729996324863,
+STORE, 140729996828672, 140729996840959,
+STORE, 140729996840960, 140729996845055,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722494771200, 140737488351231,
+SNULL, 140722494775295, 140737488351231,
+STORE, 140722494771200, 140722494775295,
+STORE, 140722494640128, 140722494775295,
+STORE, 94324011311104, 94324013535231,
+SNULL, 94324011421695, 94324013535231,
+STORE, 94324011311104, 94324011421695,
+STORE, 94324011421696, 94324013535231,
+ERASE, 94324011421696, 94324013535231,
+STORE, 94324013514752, 94324013527039,
+STORE, 94324013527040, 94324013535231,
+STORE, 140151462309888, 140151464562687,
+SNULL, 140151462453247, 140151464562687,
+STORE, 140151462309888, 140151462453247,
+STORE, 140151462453248, 140151464562687,
+ERASE, 140151462453248, 140151464562687,
+STORE, 140151464550400, 140151464558591,
+STORE, 140151464558592, 140151464562687,
+STORE, 140722495467520, 140722495471615,
+STORE, 140722495455232, 140722495467519,
+STORE, 140151464521728, 140151464550399,
+STORE, 140151464513536, 140151464521727,
+STORE, 140151458512896, 140151462309887,
+SNULL, 140151458512896, 140151460171775,
+STORE, 140151460171776, 140151462309887,
+STORE, 140151458512896, 140151460171775,
+SNULL, 140151462268927, 140151462309887,
+STORE, 140151460171776, 140151462268927,
+STORE, 140151462268928, 140151462309887,
+SNULL, 140151462268928, 140151462293503,
+STORE, 140151462293504, 140151462309887,
+STORE, 140151462268928, 140151462293503,
+ERASE, 140151462268928, 140151462293503,
+STORE, 140151462268928, 140151462293503,
+ERASE, 140151462293504, 140151462309887,
+STORE, 140151462293504, 140151462309887,
+SNULL, 140151462285311, 140151462293503,
+STORE, 140151462268928, 140151462285311,
+STORE, 140151462285312, 140151462293503,
+SNULL, 94324013522943, 94324013527039,
+STORE, 94324013514752, 94324013522943,
+STORE, 94324013522944, 94324013527039,
+SNULL, 140151464554495, 140151464558591,
+STORE, 140151464550400, 140151464554495,
+STORE, 140151464554496, 140151464558591,
+ERASE, 140151464521728, 140151464550399,
+STORE, 94324024778752, 94324024913919,
+STORE, 94899262967808, 94899263180799,
+STORE, 94899265277952, 94899265282047,
+STORE, 94899265282048, 94899265290239,
+STORE, 94899265290240, 94899265302527,
+STORE, 94899295469568, 94899298689023,
+STORE, 140434388418560, 140434390077439,
+STORE, 140434390077440, 140434392174591,
+STORE, 140434392174592, 140434392190975,
+STORE, 140434392190976, 140434392199167,
+STORE, 140434392199168, 140434392215551,
+STORE, 140434392215552, 140434392227839,
+STORE, 140434392227840, 140434394320895,
+STORE, 140434394320896, 140434394324991,
+STORE, 140434394324992, 140434394329087,
+STORE, 140434394329088, 140434394472447,
+STORE, 140434394841088, 140434396524543,
+STORE, 140434396524544, 140434396540927,
+STORE, 140434396569600, 140434396573695,
+STORE, 140434396573696, 140434396577791,
+STORE, 140434396577792, 140434396581887,
+STORE, 140720618135552, 140720618274815,
+STORE, 140720618418176, 140720618430463,
+STORE, 140720618430464, 140720618434559,
+STORE, 94425529798656, 94425530011647,
+STORE, 94425532108800, 94425532112895,
+STORE, 94425532112896, 94425532121087,
+STORE, 94425532121088, 94425532133375,
+STORE, 94425557753856, 94425566576639,
+STORE, 140600528470016, 140600530128895,
+STORE, 140600530128896, 140600532226047,
+STORE, 140600532226048, 140600532242431,
+STORE, 140600532242432, 140600532250623,
+STORE, 140600532250624, 140600532267007,
+STORE, 140600532267008, 140600532279295,
+STORE, 140600532279296, 140600534372351,
+STORE, 140600534372352, 140600534376447,
+STORE, 140600534376448, 140600534380543,
+STORE, 140600534380544, 140600534523903,
+STORE, 140600534892544, 140600536575999,
+STORE, 140600536576000, 140600536592383,
+STORE, 140600536621056, 140600536625151,
+STORE, 140600536625152, 140600536629247,
+STORE, 140600536629248, 140600536633343,
+STORE, 140721857785856, 140721857925119,
+STORE, 140721858068480, 140721858080767,
+STORE, 140721858080768, 140721858084863,
+STORE, 94425529798656, 94425530011647,
+STORE, 94425532108800, 94425532112895,
+STORE, 94425532112896, 94425532121087,
+STORE, 94425532121088, 94425532133375,
+STORE, 94425557753856, 94425568772095,
+STORE, 140600528470016, 140600530128895,
+STORE, 140600530128896, 140600532226047,
+STORE, 140600532226048, 140600532242431,
+STORE, 140600532242432, 140600532250623,
+STORE, 140600532250624, 140600532267007,
+STORE, 140600532267008, 140600532279295,
+STORE, 140600532279296, 140600534372351,
+STORE, 140600534372352, 140600534376447,
+STORE, 140600534376448, 140600534380543,
+STORE, 140600534380544, 140600534523903,
+STORE, 140600534892544, 140600536575999,
+STORE, 140600536576000, 140600536592383,
+STORE, 140600536621056, 140600536625151,
+STORE, 140600536625152, 140600536629247,
+STORE, 140600536629248, 140600536633343,
+STORE, 140721857785856, 140721857925119,
+STORE, 140721858068480, 140721858080767,
+STORE, 140721858080768, 140721858084863,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735611645952, 140737488351231,
+SNULL, 140735611654143, 140737488351231,
+STORE, 140735611645952, 140735611654143,
+STORE, 140735611514880, 140735611654143,
+STORE, 94592137641984, 94592139866111,
+SNULL, 94592137752575, 94592139866111,
+STORE, 94592137641984, 94592137752575,
+STORE, 94592137752576, 94592139866111,
+ERASE, 94592137752576, 94592139866111,
+STORE, 94592139845632, 94592139857919,
+STORE, 94592139857920, 94592139866111,
+STORE, 140350425030656, 140350427283455,
+SNULL, 140350425174015, 140350427283455,
+STORE, 140350425030656, 140350425174015,
+STORE, 140350425174016, 140350427283455,
+ERASE, 140350425174016, 140350427283455,
+STORE, 140350427271168, 140350427279359,
+STORE, 140350427279360, 140350427283455,
+STORE, 140735612043264, 140735612047359,
+STORE, 140735612030976, 140735612043263,
+STORE, 140350427242496, 140350427271167,
+STORE, 140350427234304, 140350427242495,
+STORE, 140350421233664, 140350425030655,
+SNULL, 140350421233664, 140350422892543,
+STORE, 140350422892544, 140350425030655,
+STORE, 140350421233664, 140350422892543,
+SNULL, 140350424989695, 140350425030655,
+STORE, 140350422892544, 140350424989695,
+STORE, 140350424989696, 140350425030655,
+SNULL, 140350424989696, 140350425014271,
+STORE, 140350425014272, 140350425030655,
+STORE, 140350424989696, 140350425014271,
+ERASE, 140350424989696, 140350425014271,
+STORE, 140350424989696, 140350425014271,
+ERASE, 140350425014272, 140350425030655,
+STORE, 140350425014272, 140350425030655,
+SNULL, 140350425006079, 140350425014271,
+STORE, 140350424989696, 140350425006079,
+STORE, 140350425006080, 140350425014271,
+SNULL, 94592139853823, 94592139857919,
+STORE, 94592139845632, 94592139853823,
+STORE, 94592139853824, 94592139857919,
+SNULL, 140350427275263, 140350427279359,
+STORE, 140350427271168, 140350427275263,
+STORE, 140350427275264, 140350427279359,
+ERASE, 140350427242496, 140350427271167,
+STORE, 94592164823040, 94592164958207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723500535808, 140737488351231,
+SNULL, 140723500543999, 140737488351231,
+STORE, 140723500535808, 140723500543999,
+STORE, 140723500404736, 140723500543999,
+STORE, 94458379010048, 94458381234175,
+SNULL, 94458379120639, 94458381234175,
+STORE, 94458379010048, 94458379120639,
+STORE, 94458379120640, 94458381234175,
+ERASE, 94458379120640, 94458381234175,
+STORE, 94458381213696, 94458381225983,
+STORE, 94458381225984, 94458381234175,
+STORE, 139771674230784, 139771676483583,
+SNULL, 139771674374143, 139771676483583,
+STORE, 139771674230784, 139771674374143,
+STORE, 139771674374144, 139771676483583,
+ERASE, 139771674374144, 139771676483583,
+STORE, 139771676471296, 139771676479487,
+STORE, 139771676479488, 139771676483583,
+STORE, 140723500769280, 140723500773375,
+STORE, 140723500756992, 140723500769279,
+STORE, 139771676442624, 139771676471295,
+STORE, 139771676434432, 139771676442623,
+STORE, 139771670433792, 139771674230783,
+SNULL, 139771670433792, 139771672092671,
+STORE, 139771672092672, 139771674230783,
+STORE, 139771670433792, 139771672092671,
+SNULL, 139771674189823, 139771674230783,
+STORE, 139771672092672, 139771674189823,
+STORE, 139771674189824, 139771674230783,
+SNULL, 139771674189824, 139771674214399,
+STORE, 139771674214400, 139771674230783,
+STORE, 139771674189824, 139771674214399,
+ERASE, 139771674189824, 139771674214399,
+STORE, 139771674189824, 139771674214399,
+ERASE, 139771674214400, 139771674230783,
+STORE, 139771674214400, 139771674230783,
+SNULL, 139771674206207, 139771674214399,
+STORE, 139771674189824, 139771674206207,
+STORE, 139771674206208, 139771674214399,
+SNULL, 94458381221887, 94458381225983,
+STORE, 94458381213696, 94458381221887,
+STORE, 94458381221888, 94458381225983,
+SNULL, 139771676475391, 139771676479487,
+STORE, 139771676471296, 139771676475391,
+STORE, 139771676475392, 139771676479487,
+ERASE, 139771676442624, 139771676471295,
+STORE, 94458401873920, 94458402009087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731316264960, 140737488351231,
+SNULL, 140731316273151, 140737488351231,
+STORE, 140731316264960, 140731316273151,
+STORE, 140731316133888, 140731316273151,
+STORE, 94437830881280, 94437833215999,
+SNULL, 94437831094271, 94437833215999,
+STORE, 94437830881280, 94437831094271,
+STORE, 94437831094272, 94437833215999,
+ERASE, 94437831094272, 94437833215999,
+STORE, 94437833191424, 94437833203711,
+STORE, 94437833203712, 94437833215999,
+STORE, 140265986031616, 140265988284415,
+SNULL, 140265986174975, 140265988284415,
+STORE, 140265986031616, 140265986174975,
+STORE, 140265986174976, 140265988284415,
+ERASE, 140265986174976, 140265988284415,
+STORE, 140265988272128, 140265988280319,
+STORE, 140265988280320, 140265988284415,
+STORE, 140731316318208, 140731316322303,
+STORE, 140731316305920, 140731316318207,
+STORE, 140265988243456, 140265988272127,
+STORE, 140265988235264, 140265988243455,
+STORE, 140265983918080, 140265986031615,
+SNULL, 140265983918080, 140265983930367,
+STORE, 140265983930368, 140265986031615,
+STORE, 140265983918080, 140265983930367,
+SNULL, 140265986023423, 140265986031615,
+STORE, 140265983930368, 140265986023423,
+STORE, 140265986023424, 140265986031615,
+ERASE, 140265986023424, 140265986031615,
+STORE, 140265986023424, 140265986031615,
+STORE, 140265980121088, 140265983918079,
+SNULL, 140265980121088, 140265981779967,
+STORE, 140265981779968, 140265983918079,
+STORE, 140265980121088, 140265981779967,
+SNULL, 140265983877119, 140265983918079,
+STORE, 140265981779968, 140265983877119,
+STORE, 140265983877120, 140265983918079,
+SNULL, 140265983877120, 140265983901695,
+STORE, 140265983901696, 140265983918079,
+STORE, 140265983877120, 140265983901695,
+ERASE, 140265983877120, 140265983901695,
+STORE, 140265983877120, 140265983901695,
+ERASE, 140265983901696, 140265983918079,
+STORE, 140265983901696, 140265983918079,
+STORE, 140265988227072, 140265988243455,
+SNULL, 140265983893503, 140265983901695,
+STORE, 140265983877120, 140265983893503,
+STORE, 140265983893504, 140265983901695,
+SNULL, 140265986027519, 140265986031615,
+STORE, 140265986023424, 140265986027519,
+STORE, 140265986027520, 140265986031615,
+SNULL, 94437833195519, 94437833203711,
+STORE, 94437833191424, 94437833195519,
+STORE, 94437833195520, 94437833203711,
+SNULL, 140265988276223, 140265988280319,
+STORE, 140265988272128, 140265988276223,
+STORE, 140265988276224, 140265988280319,
+ERASE, 140265988243456, 140265988272127,
+STORE, 94437847638016, 94437847773183,
+STORE, 140265986543616, 140265988227071,
+STORE, 94437847638016, 94437847908351,
+STORE, 94437847638016, 94437848043519,
+STORE, 94437847638016, 94437848190975,
+SNULL, 94437848178687, 94437848190975,
+STORE, 94437847638016, 94437848178687,
+STORE, 94437848178688, 94437848190975,
+ERASE, 94437848178688, 94437848190975,
+STORE, 94437847638016, 94437848330239,
+STORE, 94437847638016, 94437848465407,
+SNULL, 94437848444927, 94437848465407,
+STORE, 94437847638016, 94437848444927,
+STORE, 94437848444928, 94437848465407,
+ERASE, 94437848444928, 94437848465407,
+STORE, 94437847638016, 94437848584191,
+STORE, 94437847638016, 94437848719359,
+SNULL, 94437848678399, 94437848719359,
+STORE, 94437847638016, 94437848678399,
+STORE, 94437848678400, 94437848719359,
+ERASE, 94437848678400, 94437848719359,
+STORE, 94437847638016, 94437848842239,
+SNULL, 94437848825855, 94437848842239,
+STORE, 94437847638016, 94437848825855,
+STORE, 94437848825856, 94437848842239,
+ERASE, 94437848825856, 94437848842239,
+STORE, 94437847638016, 94437848961023,
+STORE, 94437847638016, 94437849096191,
+STORE, 94661814710272, 94661814923263,
+STORE, 94661817020416, 94661817024511,
+STORE, 94661817024512, 94661817032703,
+STORE, 94661817032704, 94661817044991,
+STORE, 94661840424960, 94661841240063,
+STORE, 140582259814400, 140582261473279,
+STORE, 140582261473280, 140582263570431,
+STORE, 140582263570432, 140582263586815,
+STORE, 140582263586816, 140582263595007,
+STORE, 140582263595008, 140582263611391,
+STORE, 140582263611392, 140582263623679,
+STORE, 140582263623680, 140582265716735,
+STORE, 140582265716736, 140582265720831,
+STORE, 140582265720832, 140582265724927,
+STORE, 140582265724928, 140582265868287,
+STORE, 140582266236928, 140582267920383,
+STORE, 140582267920384, 140582267936767,
+STORE, 140582267965440, 140582267969535,
+STORE, 140582267969536, 140582267973631,
+STORE, 140582267973632, 140582267977727,
+STORE, 140735472508928, 140735472648191,
+STORE, 140735472672768, 140735472685055,
+STORE, 140735472685056, 140735472689151,
+STORE, 94440069140480, 94440069353471,
+STORE, 94440071450624, 94440071454719,
+STORE, 94440071454720, 94440071462911,
+STORE, 94440071462912, 94440071475199,
+STORE, 94440072122368, 94440079048703,
+STORE, 140112218095616, 140112219754495,
+STORE, 140112219754496, 140112221851647,
+STORE, 140112221851648, 140112221868031,
+STORE, 140112221868032, 140112221876223,
+STORE, 140112221876224, 140112221892607,
+STORE, 140112221892608, 140112221904895,
+STORE, 140112221904896, 140112223997951,
+STORE, 140112223997952, 140112224002047,
+STORE, 140112224002048, 140112224006143,
+STORE, 140112224006144, 140112224149503,
+STORE, 140112224518144, 140112226201599,
+STORE, 140112226201600, 140112226217983,
+STORE, 140112226246656, 140112226250751,
+STORE, 140112226250752, 140112226254847,
+STORE, 140112226254848, 140112226258943,
+STORE, 140737460969472, 140737461108735,
+STORE, 140737462083584, 140737462095871,
+STORE, 140737462095872, 140737462099967,
+STORE, 94257654345728, 94257654390783,
+STORE, 94257656483840, 94257656487935,
+STORE, 94257656487936, 94257656492031,
+STORE, 94257656492032, 94257656496127,
+STORE, 94257665859584, 94257665994751,
+STORE, 140507070345216, 140507070386175,
+STORE, 140507070386176, 140507072483327,
+STORE, 140507072483328, 140507072487423,
+STORE, 140507072487424, 140507072491519,
+STORE, 140507072491520, 140507072516095,
+STORE, 140507072516096, 140507072561151,
+STORE, 140507072561152, 140507074654207,
+STORE, 140507074654208, 140507074658303,
+STORE, 140507074658304, 140507074662399,
+STORE, 140507074662400, 140507074744319,
+STORE, 140507074744320, 140507076841471,
+STORE, 140507076841472, 140507076845567,
+STORE, 140507076845568, 140507076849663,
+STORE, 140507076849664, 140507076857855,
+STORE, 140507076857856, 140507076886527,
+STORE, 140507076886528, 140507078979583,
+STORE, 140507078979584, 140507078983679,
+STORE, 140507078983680, 140507078987775,
+STORE, 140507078987776, 140507079086079,
+STORE, 140507079086080, 140507081179135,
+STORE, 140507081179136, 140507081183231,
+STORE, 140507081183232, 140507081187327,
+STORE, 140507081187328, 140507081203711,
+STORE, 140507081203712, 140507081220095,
+STORE, 140507081220096, 140507083317247,
+STORE, 140507083317248, 140507083321343,
+STORE, 140507083321344, 140507083325439,
+STORE, 140507083325440, 140507083792383,
+STORE, 140507083792384, 140507085885439,
+STORE, 140507085885440, 140507085889535,
+STORE, 140507085889536, 140507085893631,
+STORE, 140507085893632, 140507085905919,
+STORE, 140507085905920, 140507087998975,
+STORE, 140507087998976, 140507088003071,
+STORE, 140507088003072, 140507088007167,
+STORE, 140507088007168, 140507088125951,
+STORE, 140507088125952, 140507090219007,
+STORE, 140507090219008, 140507090223103,
+STORE, 140507090223104, 140507090227199,
+STORE, 140507090227200, 140507090268159,
+STORE, 140507090268160, 140507091927039,
+STORE, 140507091927040, 140507094024191,
+STORE, 140507094024192, 140507094040575,
+STORE, 140507094040576, 140507094048767,
+STORE, 140507094048768, 140507094065151,
+STORE, 140507094065152, 140507094216703,
+STORE, 140507094216704, 140507096309759,
+STORE, 140507096309760, 140507096313855,
+STORE, 140507096313856, 140507096317951,
+STORE, 140507096317952, 140507096326143,
+STORE, 140507096326144, 140507096379391,
+STORE, 140507096379392, 140507098472447,
+STORE, 140507098472448, 140507098476543,
+STORE, 140507098476544, 140507098480639,
+STORE, 140507098480640, 140507098623999,
+STORE, 140507098980352, 140507100663807,
+STORE, 140507100663808, 140507100692479,
+STORE, 140507100721152, 140507100725247,
+STORE, 140507100725248, 140507100729343,
+STORE, 140507100729344, 140507100733439,
+STORE, 140728152780800, 140728152915967,
+STORE, 140728153698304, 140728153710591,
+STORE, 140728153710592, 140728153714687,
+STORE, 140507068137472, 140507070345215,
+SNULL, 140507068137472, 140507068190719,
+STORE, 140507068190720, 140507070345215,
+STORE, 140507068137472, 140507068190719,
+SNULL, 140507070287871, 140507070345215,
+STORE, 140507068190720, 140507070287871,
+STORE, 140507070287872, 140507070345215,
+SNULL, 140507070287872, 140507070296063,
+STORE, 140507070296064, 140507070345215,
+STORE, 140507070287872, 140507070296063,
+ERASE, 140507070287872, 140507070296063,
+STORE, 140507070287872, 140507070296063,
+ERASE, 140507070296064, 140507070345215,
+STORE, 140507070296064, 140507070345215,
+STORE, 140507100692480, 140507100721151,
+STORE, 140507065810944, 140507068137471,
+SNULL, 140507065810944, 140507065843711,
+STORE, 140507065843712, 140507068137471,
+STORE, 140507065810944, 140507065843711,
+SNULL, 140507067940863, 140507068137471,
+STORE, 140507065843712, 140507067940863,
+STORE, 140507067940864, 140507068137471,
+SNULL, 140507067940864, 140507067949055,
+STORE, 140507067949056, 140507068137471,
+STORE, 140507067940864, 140507067949055,
+ERASE, 140507067940864, 140507067949055,
+STORE, 140507067940864, 140507067949055,
+ERASE, 140507067949056, 140507068137471,
+STORE, 140507067949056, 140507068137471,
+SNULL, 140507067944959, 140507067949055,
+STORE, 140507067940864, 140507067944959,
+STORE, 140507067944960, 140507067949055,
+SNULL, 140507070291967, 140507070296063,
+STORE, 140507070287872, 140507070291967,
+STORE, 140507070291968, 140507070296063,
+ERASE, 140507100692480, 140507100721151,
+STORE, 140507063705600, 140507065810943,
+SNULL, 140507063705600, 140507063709695,
+STORE, 140507063709696, 140507065810943,
+STORE, 140507063705600, 140507063709695,
+SNULL, 140507065802751, 140507065810943,
+STORE, 140507063709696, 140507065802751,
+STORE, 140507065802752, 140507065810943,
+ERASE, 140507065802752, 140507065810943,
+STORE, 140507065802752, 140507065810943,
+SNULL, 140507065806847, 140507065810943,
+STORE, 140507065802752, 140507065806847,
+STORE, 140507065806848, 140507065810943,
+STORE, 140507061600256, 140507063705599,
+SNULL, 140507061600256, 140507061604351,
+STORE, 140507061604352, 140507063705599,
+STORE, 140507061600256, 140507061604351,
+SNULL, 140507063697407, 140507063705599,
+STORE, 140507061604352, 140507063697407,
+STORE, 140507063697408, 140507063705599,
+ERASE, 140507063697408, 140507063705599,
+STORE, 140507063697408, 140507063705599,
+SNULL, 140507063701503, 140507063705599,
+STORE, 140507063697408, 140507063701503,
+STORE, 140507063701504, 140507063705599,
+STORE, 140507059490816, 140507061600255,
+SNULL, 140507059490816, 140507059499007,
+STORE, 140507059499008, 140507061600255,
+STORE, 140507059490816, 140507059499007,
+SNULL, 140507061592063, 140507061600255,
+STORE, 140507059499008, 140507061592063,
+STORE, 140507061592064, 140507061600255,
+ERASE, 140507061592064, 140507061600255,
+STORE, 140507061592064, 140507061600255,
+SNULL, 140507061596159, 140507061600255,
+STORE, 140507061592064, 140507061596159,
+STORE, 140507061596160, 140507061600255,
+STORE, 140507057377280, 140507059490815,
+SNULL, 140507057377280, 140507057389567,
+STORE, 140507057389568, 140507059490815,
+STORE, 140507057377280, 140507057389567,
+SNULL, 140507059482623, 140507059490815,
+STORE, 140507057389568, 140507059482623,
+STORE, 140507059482624, 140507059490815,
+ERASE, 140507059482624, 140507059490815,
+STORE, 140507059482624, 140507059490815,
+SNULL, 140507059486719, 140507059490815,
+STORE, 140507059482624, 140507059486719,
+STORE, 140507059486720, 140507059490815,
+STORE, 140507055255552, 140507057377279,
+SNULL, 140507055255552, 140507055276031,
+STORE, 140507055276032, 140507057377279,
+STORE, 140507055255552, 140507055276031,
+SNULL, 140507057369087, 140507057377279,
+STORE, 140507055276032, 140507057369087,
+STORE, 140507057369088, 140507057377279,
+ERASE, 140507057369088, 140507057377279,
+STORE, 140507057369088, 140507057377279,
+SNULL, 140507057373183, 140507057377279,
+STORE, 140507057369088, 140507057373183,
+STORE, 140507057373184, 140507057377279,
+STORE, 140507098693632, 140507098980351,
+SNULL, 140507098959871, 140507098980351,
+STORE, 140507098693632, 140507098959871,
+STORE, 140507098959872, 140507098980351,
+SNULL, 140507098959872, 140507098976255,
+STORE, 140507098976256, 140507098980351,
+STORE, 140507098959872, 140507098976255,
+ERASE, 140507098959872, 140507098976255,
+STORE, 140507098959872, 140507098976255,
+ERASE, 140507098976256, 140507098980351,
+STORE, 140507098976256, 140507098980351,
+STORE, 140507100692480, 140507100721151,
+STORE, 140507053125632, 140507055255551,
+SNULL, 140507053125632, 140507053154303,
+STORE, 140507053154304, 140507055255551,
+STORE, 140507053125632, 140507053154303,
+SNULL, 140507055247359, 140507055255551,
+STORE, 140507053154304, 140507055247359,
+STORE, 140507055247360, 140507055255551,
+ERASE, 140507055247360, 140507055255551,
+STORE, 140507055247360, 140507055255551,
+STORE, 140507051012096, 140507053125631,
+SNULL, 140507051012096, 140507051024383,
+STORE, 140507051024384, 140507053125631,
+STORE, 140507051012096, 140507051024383,
+SNULL, 140507053117439, 140507053125631,
+STORE, 140507051024384, 140507053117439,
+STORE, 140507053117440, 140507053125631,
+ERASE, 140507053117440, 140507053125631,
+STORE, 140507053117440, 140507053125631,
+SNULL, 140507053121535, 140507053125631,
+STORE, 140507053117440, 140507053121535,
+STORE, 140507053121536, 140507053125631,
+SNULL, 140507055251455, 140507055255551,
+STORE, 140507055247360, 140507055251455,
+STORE, 140507055251456, 140507055255551,
+SNULL, 140507098972159, 140507098976255,
+STORE, 140507098959872, 140507098972159,
+STORE, 140507098972160, 140507098976255,
+ERASE, 140507100692480, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100717056, 140507100721151,
+ERASE, 140507100717056, 140507100721151,
+STORE, 140507100692480, 140507100721151,
+ERASE, 140507068137472, 140507068190719,
+ERASE, 140507068190720, 140507070287871,
+ERASE, 140507070287872, 140507070291967,
+ERASE, 140507070291968, 140507070296063,
+ERASE, 140507070296064, 140507070345215,
+ERASE, 140507065810944, 140507065843711,
+ERASE, 140507065843712, 140507067940863,
+ERASE, 140507067940864, 140507067944959,
+ERASE, 140507067944960, 140507067949055,
+ERASE, 140507067949056, 140507068137471,
+ERASE, 140507063705600, 140507063709695,
+ERASE, 140507063709696, 140507065802751,
+ERASE, 140507065802752, 140507065806847,
+ERASE, 140507065806848, 140507065810943,
+ERASE, 140507061600256, 140507061604351,
+ERASE, 140507061604352, 140507063697407,
+ERASE, 140507063697408, 140507063701503,
+ERASE, 140507063701504, 140507063705599,
+ERASE, 140507059490816, 140507059499007,
+ERASE, 140507059499008, 140507061592063,
+ERASE, 140507061592064, 140507061596159,
+ERASE, 140507061596160, 140507061600255,
+ERASE, 140507057377280, 140507057389567,
+ERASE, 140507057389568, 140507059482623,
+ERASE, 140507059482624, 140507059486719,
+ERASE, 140507059486720, 140507059490815,
+ERASE, 140507055255552, 140507055276031,
+ERASE, 140507055276032, 140507057369087,
+ERASE, 140507057369088, 140507057373183,
+ERASE, 140507057373184, 140507057377279,
+ERASE, 140507098693632, 140507098959871,
+ERASE, 140507098959872, 140507098972159,
+ERASE, 140507098972160, 140507098976255,
+ERASE, 140507098976256, 140507098980351,
+ERASE, 140507051012096, 140507051024383,
+ERASE, 140507051024384, 140507053117439,
+ERASE, 140507053117440, 140507053121535,
+ERASE, 140507053121536, 140507053125631,
+STORE, 94036448296960, 94036448509951,
+STORE, 94036450607104, 94036450611199,
+STORE, 94036450611200, 94036450619391,
+STORE, 94036450619392, 94036450631679,
+STORE, 94036482445312, 94036502376447,
+STORE, 140469487013888, 140469488672767,
+STORE, 140469488672768, 140469490769919,
+STORE, 140469490769920, 140469490786303,
+STORE, 140469490786304, 140469490794495,
+STORE, 140469490794496, 140469490810879,
+STORE, 140469490810880, 140469490823167,
+STORE, 140469490823168, 140469492916223,
+STORE, 140469492916224, 140469492920319,
+STORE, 140469492920320, 140469492924415,
+STORE, 140469492924416, 140469493067775,
+STORE, 140469493436416, 140469495119871,
+STORE, 140469495119872, 140469495136255,
+STORE, 140469495164928, 140469495169023,
+STORE, 140469495169024, 140469495173119,
+STORE, 140469495173120, 140469495177215,
+STORE, 140732281446400, 140732281585663,
+STORE, 140732282736640, 140732282748927,
+STORE, 140732282748928, 140732282753023,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723411931136, 140737488351231,
+SNULL, 140723411939327, 140737488351231,
+STORE, 140723411931136, 140723411939327,
+STORE, 140723411800064, 140723411939327,
+STORE, 93993768685568, 93993770909695,
+SNULL, 93993768796159, 93993770909695,
+STORE, 93993768685568, 93993768796159,
+STORE, 93993768796160, 93993770909695,
+ERASE, 93993768796160, 93993770909695,
+STORE, 93993770889216, 93993770901503,
+STORE, 93993770901504, 93993770909695,
+STORE, 140508681740288, 140508683993087,
+SNULL, 140508681883647, 140508683993087,
+STORE, 140508681740288, 140508681883647,
+STORE, 140508681883648, 140508683993087,
+ERASE, 140508681883648, 140508683993087,
+STORE, 140508683980800, 140508683988991,
+STORE, 140508683988992, 140508683993087,
+STORE, 140723412070400, 140723412074495,
+STORE, 140723412058112, 140723412070399,
+STORE, 140508683952128, 140508683980799,
+STORE, 140508683943936, 140508683952127,
+STORE, 140508677943296, 140508681740287,
+SNULL, 140508677943296, 140508679602175,
+STORE, 140508679602176, 140508681740287,
+STORE, 140508677943296, 140508679602175,
+SNULL, 140508681699327, 140508681740287,
+STORE, 140508679602176, 140508681699327,
+STORE, 140508681699328, 140508681740287,
+SNULL, 140508681699328, 140508681723903,
+STORE, 140508681723904, 140508681740287,
+STORE, 140508681699328, 140508681723903,
+ERASE, 140508681699328, 140508681723903,
+STORE, 140508681699328, 140508681723903,
+ERASE, 140508681723904, 140508681740287,
+STORE, 140508681723904, 140508681740287,
+SNULL, 140508681715711, 140508681723903,
+STORE, 140508681699328, 140508681715711,
+STORE, 140508681715712, 140508681723903,
+SNULL, 93993770897407, 93993770901503,
+STORE, 93993770889216, 93993770897407,
+STORE, 93993770897408, 93993770901503,
+SNULL, 140508683984895, 140508683988991,
+STORE, 140508683980800, 140508683984895,
+STORE, 140508683984896, 140508683988991,
+ERASE, 140508683952128, 140508683980799,
+STORE, 93993791582208, 93993791717375,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734685458432, 140737488351231,
+SNULL, 140734685466623, 140737488351231,
+STORE, 140734685458432, 140734685466623,
+STORE, 140734685327360, 140734685466623,
+STORE, 93832321548288, 93832323772415,
+SNULL, 93832321658879, 93832323772415,
+STORE, 93832321548288, 93832321658879,
+STORE, 93832321658880, 93832323772415,
+ERASE, 93832321658880, 93832323772415,
+STORE, 93832323751936, 93832323764223,
+STORE, 93832323764224, 93832323772415,
+STORE, 140650945118208, 140650947371007,
+SNULL, 140650945261567, 140650947371007,
+STORE, 140650945118208, 140650945261567,
+STORE, 140650945261568, 140650947371007,
+ERASE, 140650945261568, 140650947371007,
+STORE, 140650947358720, 140650947366911,
+STORE, 140650947366912, 140650947371007,
+STORE, 140734686081024, 140734686085119,
+STORE, 140734686068736, 140734686081023,
+STORE, 140650947330048, 140650947358719,
+STORE, 140650947321856, 140650947330047,
+STORE, 140650941321216, 140650945118207,
+SNULL, 140650941321216, 140650942980095,
+STORE, 140650942980096, 140650945118207,
+STORE, 140650941321216, 140650942980095,
+SNULL, 140650945077247, 140650945118207,
+STORE, 140650942980096, 140650945077247,
+STORE, 140650945077248, 140650945118207,
+SNULL, 140650945077248, 140650945101823,
+STORE, 140650945101824, 140650945118207,
+STORE, 140650945077248, 140650945101823,
+ERASE, 140650945077248, 140650945101823,
+STORE, 140650945077248, 140650945101823,
+ERASE, 140650945101824, 140650945118207,
+STORE, 140650945101824, 140650945118207,
+SNULL, 140650945093631, 140650945101823,
+STORE, 140650945077248, 140650945093631,
+STORE, 140650945093632, 140650945101823,
+SNULL, 93832323760127, 93832323764223,
+STORE, 93832323751936, 93832323760127,
+STORE, 93832323760128, 93832323764223,
+SNULL, 140650947362815, 140650947366911,
+STORE, 140650947358720, 140650947362815,
+STORE, 140650947362816, 140650947366911,
+ERASE, 140650947330048, 140650947358719,
+STORE, 93832331890688, 93832332025855,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728333520896, 140737488351231,
+SNULL, 140728333529087, 140737488351231,
+STORE, 140728333520896, 140728333529087,
+STORE, 140728333389824, 140728333529087,
+STORE, 94872734732288, 94872736956415,
+SNULL, 94872734842879, 94872736956415,
+STORE, 94872734732288, 94872734842879,
+STORE, 94872734842880, 94872736956415,
+ERASE, 94872734842880, 94872736956415,
+STORE, 94872736935936, 94872736948223,
+STORE, 94872736948224, 94872736956415,
+STORE, 139755193257984, 139755195510783,
+SNULL, 139755193401343, 139755195510783,
+STORE, 139755193257984, 139755193401343,
+STORE, 139755193401344, 139755195510783,
+ERASE, 139755193401344, 139755195510783,
+STORE, 139755195498496, 139755195506687,
+STORE, 139755195506688, 139755195510783,
+STORE, 140728333926400, 140728333930495,
+STORE, 140728333914112, 140728333926399,
+STORE, 139755195469824, 139755195498495,
+STORE, 139755195461632, 139755195469823,
+STORE, 139755189460992, 139755193257983,
+SNULL, 139755189460992, 139755191119871,
+STORE, 139755191119872, 139755193257983,
+STORE, 139755189460992, 139755191119871,
+SNULL, 139755193217023, 139755193257983,
+STORE, 139755191119872, 139755193217023,
+STORE, 139755193217024, 139755193257983,
+SNULL, 139755193217024, 139755193241599,
+STORE, 139755193241600, 139755193257983,
+STORE, 139755193217024, 139755193241599,
+ERASE, 139755193217024, 139755193241599,
+STORE, 139755193217024, 139755193241599,
+ERASE, 139755193241600, 139755193257983,
+STORE, 139755193241600, 139755193257983,
+SNULL, 139755193233407, 139755193241599,
+STORE, 139755193217024, 139755193233407,
+STORE, 139755193233408, 139755193241599,
+SNULL, 94872736944127, 94872736948223,
+STORE, 94872736935936, 94872736944127,
+STORE, 94872736944128, 94872736948223,
+SNULL, 139755195502591, 139755195506687,
+STORE, 139755195498496, 139755195502591,
+STORE, 139755195502592, 139755195506687,
+ERASE, 139755195469824, 139755195498495,
+STORE, 94872749744128, 94872749879295,
+STORE, 94720243642368, 94720243855359,
+STORE, 94720245952512, 94720245956607,
+STORE, 94720245956608, 94720245964799,
+STORE, 94720245964800, 94720245977087,
+STORE, 94720277745664, 94720278151167,
+STORE, 140453174497280, 140453176156159,
+STORE, 140453176156160, 140453178253311,
+STORE, 140453178253312, 140453178269695,
+STORE, 140453178269696, 140453178277887,
+STORE, 140453178277888, 140453178294271,
+STORE, 140453178294272, 140453178306559,
+STORE, 140453178306560, 140453180399615,
+STORE, 140453180399616, 140453180403711,
+STORE, 140453180403712, 140453180407807,
+STORE, 140453180407808, 140453180551167,
+STORE, 140453180919808, 140453182603263,
+STORE, 140453182603264, 140453182619647,
+STORE, 140453182648320, 140453182652415,
+STORE, 140453182652416, 140453182656511,
+STORE, 140453182656512, 140453182660607,
+STORE, 140733223923712, 140733224062975,
+STORE, 140733224808448, 140733224820735,
+STORE, 140733224820736, 140733224824831,
+STORE, 94321091141632, 94321091354623,
+STORE, 94321093451776, 94321093455871,
+STORE, 94321093455872, 94321093464063,
+STORE, 94321093464064, 94321093476351,
+STORE, 94321115873280, 94321117229055,
+STORE, 139695978840064, 139695980498943,
+STORE, 139695980498944, 139695982596095,
+STORE, 139695982596096, 139695982612479,
+STORE, 139695982612480, 139695982620671,
+STORE, 139695982620672, 139695982637055,
+STORE, 139695982637056, 139695982649343,
+STORE, 139695982649344, 139695984742399,
+STORE, 139695984742400, 139695984746495,
+STORE, 139695984746496, 139695984750591,
+STORE, 139695984750592, 139695984893951,
+STORE, 139695985262592, 139695986946047,
+STORE, 139695986946048, 139695986962431,
+STORE, 139695986991104, 139695986995199,
+STORE, 139695986995200, 139695986999295,
+STORE, 139695986999296, 139695987003391,
+STORE, 140734650564608, 140734650703871,
+STORE, 140734650785792, 140734650798079,
+STORE, 140734650798080, 140734650802175,
+STORE, 94523438456832, 94523438669823,
+STORE, 94523440766976, 94523440771071,
+STORE, 94523440771072, 94523440779263,
+STORE, 94523440779264, 94523440791551,
+STORE, 94523464544256, 94523465842687,
+STORE, 140453231493120, 140453233151999,
+STORE, 140453233152000, 140453235249151,
+STORE, 140453235249152, 140453235265535,
+STORE, 140453235265536, 140453235273727,
+STORE, 140453235273728, 140453235290111,
+STORE, 140453235290112, 140453235302399,
+STORE, 140453235302400, 140453237395455,
+STORE, 140453237395456, 140453237399551,
+STORE, 140453237399552, 140453237403647,
+STORE, 140453237403648, 140453237547007,
+STORE, 140453237915648, 140453239599103,
+STORE, 140453239599104, 140453239615487,
+STORE, 140453239644160, 140453239648255,
+STORE, 140453239648256, 140453239652351,
+STORE, 140453239652352, 140453239656447,
+STORE, 140734679445504, 140734679584767,
+STORE, 140734680018944, 140734680031231,
+STORE, 140734680031232, 140734680035327,
+STORE, 94614776987648, 94614777200639,
+STORE, 94614779297792, 94614779301887,
+STORE, 94614779301888, 94614779310079,
+STORE, 94614779310080, 94614779322367,
+STORE, 94614798467072, 94614800699391,
+STORE, 139677037182976, 139677038841855,
+STORE, 139677038841856, 139677040939007,
+STORE, 139677040939008, 139677040955391,
+STORE, 139677040955392, 139677040963583,
+STORE, 139677040963584, 139677040979967,
+STORE, 139677040979968, 139677040992255,
+STORE, 139677040992256, 139677043085311,
+STORE, 139677043085312, 139677043089407,
+STORE, 139677043089408, 139677043093503,
+STORE, 139677043093504, 139677043236863,
+STORE, 139677043605504, 139677045288959,
+STORE, 139677045288960, 139677045305343,
+STORE, 139677045334016, 139677045338111,
+STORE, 139677045338112, 139677045342207,
+STORE, 139677045342208, 139677045346303,
+STORE, 140721604411392, 140721604550655,
+STORE, 140721606135808, 140721606148095,
+STORE, 140721606148096, 140721606152191,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729280544768, 140737488351231,
+SNULL, 140729280552959, 140737488351231,
+STORE, 140729280544768, 140729280552959,
+STORE, 140729280413696, 140729280552959,
+STORE, 94863939334144, 94863941558271,
+SNULL, 94863939444735, 94863941558271,
+STORE, 94863939334144, 94863939444735,
+STORE, 94863939444736, 94863941558271,
+ERASE, 94863939444736, 94863941558271,
+STORE, 94863941537792, 94863941550079,
+STORE, 94863941550080, 94863941558271,
+STORE, 139691047276544, 139691049529343,
+SNULL, 139691047419903, 139691049529343,
+STORE, 139691047276544, 139691047419903,
+STORE, 139691047419904, 139691049529343,
+ERASE, 139691047419904, 139691049529343,
+STORE, 139691049517056, 139691049525247,
+STORE, 139691049525248, 139691049529343,
+STORE, 140729281679360, 140729281683455,
+STORE, 140729281667072, 140729281679359,
+STORE, 139691049488384, 139691049517055,
+STORE, 139691049480192, 139691049488383,
+STORE, 139691043479552, 139691047276543,
+SNULL, 139691043479552, 139691045138431,
+STORE, 139691045138432, 139691047276543,
+STORE, 139691043479552, 139691045138431,
+SNULL, 139691047235583, 139691047276543,
+STORE, 139691045138432, 139691047235583,
+STORE, 139691047235584, 139691047276543,
+SNULL, 139691047235584, 139691047260159,
+STORE, 139691047260160, 139691047276543,
+STORE, 139691047235584, 139691047260159,
+ERASE, 139691047235584, 139691047260159,
+STORE, 139691047235584, 139691047260159,
+ERASE, 139691047260160, 139691047276543,
+STORE, 139691047260160, 139691047276543,
+SNULL, 139691047251967, 139691047260159,
+STORE, 139691047235584, 139691047251967,
+STORE, 139691047251968, 139691047260159,
+SNULL, 94863941545983, 94863941550079,
+STORE, 94863941537792, 94863941545983,
+STORE, 94863941545984, 94863941550079,
+SNULL, 139691049521151, 139691049525247,
+STORE, 139691049517056, 139691049521151,
+STORE, 139691049521152, 139691049525247,
+ERASE, 139691049488384, 139691049517055,
+STORE, 94863951294464, 94863951429631,
+STORE, 93998209294336, 93998209507327,
+STORE, 93998211604480, 93998211608575,
+STORE, 93998211608576, 93998211616767,
+STORE, 93998211616768, 93998211629055,
+STORE, 93998227210240, 93998227615743,
+STORE, 140243029913600, 140243031572479,
+STORE, 140243031572480, 140243033669631,
+STORE, 140243033669632, 140243033686015,
+STORE, 140243033686016, 140243033694207,
+STORE, 140243033694208, 140243033710591,
+STORE, 140243033710592, 140243033722879,
+STORE, 140243033722880, 140243035815935,
+STORE, 140243035815936, 140243035820031,
+STORE, 140243035820032, 140243035824127,
+STORE, 140243035824128, 140243035967487,
+STORE, 140243036336128, 140243038019583,
+STORE, 140243038019584, 140243038035967,
+STORE, 140243038064640, 140243038068735,
+STORE, 140243038068736, 140243038072831,
+STORE, 140243038072832, 140243038076927,
+STORE, 140734976479232, 140734976618495,
+STORE, 140734977978368, 140734977990655,
+STORE, 140734977990656, 140734977994751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722742775808, 140737488351231,
+SNULL, 140722742783999, 140737488351231,
+STORE, 140722742775808, 140722742783999,
+STORE, 140722742644736, 140722742783999,
+STORE, 93857673662464, 93857675997183,
+SNULL, 93857673875455, 93857675997183,
+STORE, 93857673662464, 93857673875455,
+STORE, 93857673875456, 93857675997183,
+ERASE, 93857673875456, 93857675997183,
+STORE, 93857675972608, 93857675984895,
+STORE, 93857675984896, 93857675997183,
+STORE, 140629677498368, 140629679751167,
+SNULL, 140629677641727, 140629679751167,
+STORE, 140629677498368, 140629677641727,
+STORE, 140629677641728, 140629679751167,
+ERASE, 140629677641728, 140629679751167,
+STORE, 140629679738880, 140629679747071,
+STORE, 140629679747072, 140629679751167,
+STORE, 140722743222272, 140722743226367,
+STORE, 140722743209984, 140722743222271,
+STORE, 140629679710208, 140629679738879,
+STORE, 140629679702016, 140629679710207,
+STORE, 140629675384832, 140629677498367,
+SNULL, 140629675384832, 140629675397119,
+STORE, 140629675397120, 140629677498367,
+STORE, 140629675384832, 140629675397119,
+SNULL, 140629677490175, 140629677498367,
+STORE, 140629675397120, 140629677490175,
+STORE, 140629677490176, 140629677498367,
+ERASE, 140629677490176, 140629677498367,
+STORE, 140629677490176, 140629677498367,
+STORE, 140629671587840, 140629675384831,
+SNULL, 140629671587840, 140629673246719,
+STORE, 140629673246720, 140629675384831,
+STORE, 140629671587840, 140629673246719,
+SNULL, 140629675343871, 140629675384831,
+STORE, 140629673246720, 140629675343871,
+STORE, 140629675343872, 140629675384831,
+SNULL, 140629675343872, 140629675368447,
+STORE, 140629675368448, 140629675384831,
+STORE, 140629675343872, 140629675368447,
+ERASE, 140629675343872, 140629675368447,
+STORE, 140629675343872, 140629675368447,
+ERASE, 140629675368448, 140629675384831,
+STORE, 140629675368448, 140629675384831,
+STORE, 140629679693824, 140629679710207,
+SNULL, 140629675360255, 140629675368447,
+STORE, 140629675343872, 140629675360255,
+STORE, 140629675360256, 140629675368447,
+SNULL, 140629677494271, 140629677498367,
+STORE, 140629677490176, 140629677494271,
+STORE, 140629677494272, 140629677498367,
+SNULL, 93857675976703, 93857675984895,
+STORE, 93857675972608, 93857675976703,
+STORE, 93857675976704, 93857675984895,
+SNULL, 140629679742975, 140629679747071,
+STORE, 140629679738880, 140629679742975,
+STORE, 140629679742976, 140629679747071,
+ERASE, 140629679710208, 140629679738879,
+STORE, 93857705832448, 93857705967615,
+STORE, 140629678010368, 140629679693823,
+STORE, 93857705832448, 93857706102783,
+STORE, 93857705832448, 93857706237951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735922421760, 140737488351231,
+SNULL, 140735922429951, 140737488351231,
+STORE, 140735922421760, 140735922429951,
+STORE, 140735922290688, 140735922429951,
+STORE, 94651136139264, 94651138363391,
+SNULL, 94651136249855, 94651138363391,
+STORE, 94651136139264, 94651136249855,
+STORE, 94651136249856, 94651138363391,
+ERASE, 94651136249856, 94651138363391,
+STORE, 94651138342912, 94651138355199,
+STORE, 94651138355200, 94651138363391,
+STORE, 140325788266496, 140325790519295,
+SNULL, 140325788409855, 140325790519295,
+STORE, 140325788266496, 140325788409855,
+STORE, 140325788409856, 140325790519295,
+ERASE, 140325788409856, 140325790519295,
+STORE, 140325790507008, 140325790515199,
+STORE, 140325790515200, 140325790519295,
+STORE, 140735923572736, 140735923576831,
+STORE, 140735923560448, 140735923572735,
+STORE, 140325790478336, 140325790507007,
+STORE, 140325790470144, 140325790478335,
+STORE, 140325784469504, 140325788266495,
+SNULL, 140325784469504, 140325786128383,
+STORE, 140325786128384, 140325788266495,
+STORE, 140325784469504, 140325786128383,
+SNULL, 140325788225535, 140325788266495,
+STORE, 140325786128384, 140325788225535,
+STORE, 140325788225536, 140325788266495,
+SNULL, 140325788225536, 140325788250111,
+STORE, 140325788250112, 140325788266495,
+STORE, 140325788225536, 140325788250111,
+ERASE, 140325788225536, 140325788250111,
+STORE, 140325788225536, 140325788250111,
+ERASE, 140325788250112, 140325788266495,
+STORE, 140325788250112, 140325788266495,
+SNULL, 140325788241919, 140325788250111,
+STORE, 140325788225536, 140325788241919,
+STORE, 140325788241920, 140325788250111,
+SNULL, 94651138351103, 94651138355199,
+STORE, 94651138342912, 94651138351103,
+STORE, 94651138351104, 94651138355199,
+SNULL, 140325790511103, 140325790515199,
+STORE, 140325790507008, 140325790511103,
+STORE, 140325790511104, 140325790515199,
+ERASE, 140325790478336, 140325790507007,
+STORE, 94651146297344, 94651146432511,
+STORE, 94212330168320, 94212330381311,
+STORE, 94212332478464, 94212332482559,
+STORE, 94212332482560, 94212332490751,
+STORE, 94212332490752, 94212332503039,
+STORE, 94212348891136, 94212349825023,
+STORE, 140611630604288, 140611632263167,
+STORE, 140611632263168, 140611634360319,
+STORE, 140611634360320, 140611634376703,
+STORE, 140611634376704, 140611634384895,
+STORE, 140611634384896, 140611634401279,
+STORE, 140611634401280, 140611634413567,
+STORE, 140611634413568, 140611636506623,
+STORE, 140611636506624, 140611636510719,
+STORE, 140611636510720, 140611636514815,
+STORE, 140611636514816, 140611636658175,
+STORE, 140611637026816, 140611638710271,
+STORE, 140611638710272, 140611638726655,
+STORE, 140611638755328, 140611638759423,
+STORE, 140611638759424, 140611638763519,
+STORE, 140611638763520, 140611638767615,
+STORE, 140726974533632, 140726974672895,
+STORE, 140726974943232, 140726974955519,
+STORE, 140726974955520, 140726974959615,
+STORE, 94572463521792, 94572463734783,
+STORE, 94572465831936, 94572465836031,
+STORE, 94572465836032, 94572465844223,
+STORE, 94572465844224, 94572465856511,
+STORE, 94572491534336, 94572492865535,
+STORE, 140644351492096, 140644353150975,
+STORE, 140644353150976, 140644355248127,
+STORE, 140644355248128, 140644355264511,
+STORE, 140644355264512, 140644355272703,
+STORE, 140644355272704, 140644355289087,
+STORE, 140644355289088, 140644355301375,
+STORE, 140644355301376, 140644357394431,
+STORE, 140644357394432, 140644357398527,
+STORE, 140644357398528, 140644357402623,
+STORE, 140644357402624, 140644357545983,
+STORE, 140644357914624, 140644359598079,
+STORE, 140644359598080, 140644359614463,
+STORE, 140644359643136, 140644359647231,
+STORE, 140644359647232, 140644359651327,
+STORE, 140644359651328, 140644359655423,
+STORE, 140727841824768, 140727841964031,
+STORE, 140727843188736, 140727843201023,
+STORE, 140727843201024, 140727843205119,
+STORE, 94144315457536, 94144315670527,
+STORE, 94144317767680, 94144317771775,
+STORE, 94144317771776, 94144317779967,
+STORE, 94144317779968, 94144317792255,
+STORE, 94144318369792, 94144320815103,
+STORE, 140316717645824, 140316719304703,
+STORE, 140316719304704, 140316721401855,
+STORE, 140316721401856, 140316721418239,
+STORE, 140316721418240, 140316721426431,
+STORE, 140316721426432, 140316721442815,
+STORE, 140316721442816, 140316721455103,
+STORE, 140316721455104, 140316723548159,
+STORE, 140316723548160, 140316723552255,
+STORE, 140316723552256, 140316723556351,
+STORE, 140316723556352, 140316723699711,
+STORE, 140316724068352, 140316725751807,
+STORE, 140316725751808, 140316725768191,
+STORE, 140316725796864, 140316725800959,
+STORE, 140316725800960, 140316725805055,
+STORE, 140316725805056, 140316725809151,
+STORE, 140725744283648, 140725744422911,
+STORE, 140725745852416, 140725745864703,
+STORE, 140725745864704, 140725745868799,
+STORE, 94646858846208, 94646859059199,
+STORE, 94646861156352, 94646861160447,
+STORE, 94646861160448, 94646861168639,
+STORE, 94646861168640, 94646861180927,
+STORE, 94646879805440, 94646881894399,
+STORE, 140435449745408, 140435451404287,
+STORE, 140435451404288, 140435453501439,
+STORE, 140435453501440, 140435453517823,
+STORE, 140435453517824, 140435453526015,
+STORE, 140435453526016, 140435453542399,
+STORE, 140435453542400, 140435453554687,
+STORE, 140435453554688, 140435455647743,
+STORE, 140435455647744, 140435455651839,
+STORE, 140435455651840, 140435455655935,
+STORE, 140435455655936, 140435455799295,
+STORE, 140435456167936, 140435457851391,
+STORE, 140435457851392, 140435457867775,
+STORE, 140435457896448, 140435457900543,
+STORE, 140435457900544, 140435457904639,
+STORE, 140435457904640, 140435457908735,
+STORE, 140721033818112, 140721033957375,
+STORE, 140721034018816, 140721034031103,
+STORE, 140721034031104, 140721034035199,
+STORE, 94872903438336, 94872903651327,
+STORE, 94872905748480, 94872905752575,
+STORE, 94872905752576, 94872905760767,
+STORE, 94872905760768, 94872905773055,
+STORE, 94872931246080, 94872931651583,
+STORE, 139771607810048, 139771609468927,
+STORE, 139771609468928, 139771611566079,
+STORE, 139771611566080, 139771611582463,
+STORE, 139771611582464, 139771611590655,
+STORE, 139771611590656, 139771611607039,
+STORE, 139771611607040, 139771611619327,
+STORE, 139771611619328, 139771613712383,
+STORE, 139771613712384, 139771613716479,
+STORE, 139771613716480, 139771613720575,
+STORE, 139771613720576, 139771613863935,
+STORE, 139771614232576, 139771615916031,
+STORE, 139771615916032, 139771615932415,
+STORE, 139771615961088, 139771615965183,
+STORE, 139771615965184, 139771615969279,
+STORE, 139771615969280, 139771615973375,
+STORE, 140725402931200, 140725403070463,
+STORE, 140725403852800, 140725403865087,
+STORE, 140725403865088, 140725403869183,
+STORE, 94740737736704, 94740737949695,
+STORE, 94740740046848, 94740740050943,
+STORE, 94740740050944, 94740740059135,
+STORE, 94740740059136, 94740740071423,
+STORE, 94740743249920, 94740744724479,
+STORE, 140640287010816, 140640288669695,
+STORE, 140640288669696, 140640290766847,
+STORE, 140640290766848, 140640290783231,
+STORE, 140640290783232, 140640290791423,
+STORE, 140640290791424, 140640290807807,
+STORE, 140640290807808, 140640290820095,
+STORE, 140640290820096, 140640292913151,
+STORE, 140640292913152, 140640292917247,
+STORE, 140640292917248, 140640292921343,
+STORE, 140640292921344, 140640293064703,
+STORE, 140640293433344, 140640295116799,
+STORE, 140640295116800, 140640295133183,
+STORE, 140640295161856, 140640295165951,
+STORE, 140640295165952, 140640295170047,
+STORE, 140640295170048, 140640295174143,
+STORE, 140725133303808, 140725133443071,
+STORE, 140725133684736, 140725133697023,
+STORE, 140725133697024, 140725133701119,
+STORE, 140737488347136, 140737488351231,
+STORE, 140722826371072, 140737488351231,
+SNULL, 140722826375167, 140737488351231,
+STORE, 140722826371072, 140722826375167,
+STORE, 140722826240000, 140722826375167,
+STORE, 94113818611712, 94113820835839,
+SNULL, 94113818722303, 94113820835839,
+STORE, 94113818611712, 94113818722303,
+STORE, 94113818722304, 94113820835839,
+ERASE, 94113818722304, 94113820835839,
+STORE, 94113820815360, 94113820827647,
+STORE, 94113820827648, 94113820835839,
+STORE, 139628194508800, 139628196761599,
+SNULL, 139628194652159, 139628196761599,
+STORE, 139628194508800, 139628194652159,
+STORE, 139628194652160, 139628196761599,
+ERASE, 139628194652160, 139628196761599,
+STORE, 139628196749312, 139628196757503,
+STORE, 139628196757504, 139628196761599,
+STORE, 140722826727424, 140722826731519,
+STORE, 140722826715136, 140722826727423,
+STORE, 139628196720640, 139628196749311,
+STORE, 139628196712448, 139628196720639,
+STORE, 139628190711808, 139628194508799,
+SNULL, 139628190711808, 139628192370687,
+STORE, 139628192370688, 139628194508799,
+STORE, 139628190711808, 139628192370687,
+SNULL, 139628194467839, 139628194508799,
+STORE, 139628192370688, 139628194467839,
+STORE, 139628194467840, 139628194508799,
+SNULL, 139628194467840, 139628194492415,
+STORE, 139628194492416, 139628194508799,
+STORE, 139628194467840, 139628194492415,
+ERASE, 139628194467840, 139628194492415,
+STORE, 139628194467840, 139628194492415,
+ERASE, 139628194492416, 139628194508799,
+STORE, 139628194492416, 139628194508799,
+SNULL, 139628194484223, 139628194492415,
+STORE, 139628194467840, 139628194484223,
+STORE, 139628194484224, 139628194492415,
+SNULL, 94113820823551, 94113820827647,
+STORE, 94113820815360, 94113820823551,
+STORE, 94113820823552, 94113820827647,
+SNULL, 139628196753407, 139628196757503,
+STORE, 139628196749312, 139628196753407,
+STORE, 139628196753408, 139628196757503,
+ERASE, 139628196720640, 139628196749311,
+STORE, 94113830850560, 94113830985727,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731865833472, 140737488351231,
+SNULL, 140731865841663, 140737488351231,
+STORE, 140731865833472, 140731865841663,
+STORE, 140731865702400, 140731865841663,
+STORE, 94763339386880, 94763341611007,
+SNULL, 94763339497471, 94763341611007,
+STORE, 94763339386880, 94763339497471,
+STORE, 94763339497472, 94763341611007,
+ERASE, 94763339497472, 94763341611007,
+STORE, 94763341590528, 94763341602815,
+STORE, 94763341602816, 94763341611007,
+STORE, 139778398486528, 139778400739327,
+SNULL, 139778398629887, 139778400739327,
+STORE, 139778398486528, 139778398629887,
+STORE, 139778398629888, 139778400739327,
+ERASE, 139778398629888, 139778400739327,
+STORE, 139778400727040, 139778400735231,
+STORE, 139778400735232, 139778400739327,
+STORE, 140731865858048, 140731865862143,
+STORE, 140731865845760, 140731865858047,
+STORE, 139778400698368, 139778400727039,
+STORE, 139778400690176, 139778400698367,
+STORE, 139778394689536, 139778398486527,
+SNULL, 139778394689536, 139778396348415,
+STORE, 139778396348416, 139778398486527,
+STORE, 139778394689536, 139778396348415,
+SNULL, 139778398445567, 139778398486527,
+STORE, 139778396348416, 139778398445567,
+STORE, 139778398445568, 139778398486527,
+SNULL, 139778398445568, 139778398470143,
+STORE, 139778398470144, 139778398486527,
+STORE, 139778398445568, 139778398470143,
+ERASE, 139778398445568, 139778398470143,
+STORE, 139778398445568, 139778398470143,
+ERASE, 139778398470144, 139778398486527,
+STORE, 139778398470144, 139778398486527,
+SNULL, 139778398461951, 139778398470143,
+STORE, 139778398445568, 139778398461951,
+STORE, 139778398461952, 139778398470143,
+SNULL, 94763341598719, 94763341602815,
+STORE, 94763341590528, 94763341598719,
+STORE, 94763341598720, 94763341602815,
+SNULL, 139778400731135, 139778400735231,
+STORE, 139778400727040, 139778400731135,
+STORE, 139778400731136, 139778400735231,
+ERASE, 139778400698368, 139778400727039,
+STORE, 94763362197504, 94763362332671,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140732053192704, 140737488351231,
+SNULL, 140732053204991, 140737488351231,
+STORE, 140732053192704, 140732053204991,
+STORE, 140732053061632, 140732053204991,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140176018599936, 140176020852735,
+SNULL, 140176018743295, 140176020852735,
+STORE, 140176018599936, 140176018743295,
+STORE, 140176018743296, 140176020852735,
+ERASE, 140176018743296, 140176020852735,
+STORE, 140176020840448, 140176020848639,
+STORE, 140176020848640, 140176020852735,
+STORE, 140732053381120, 140732053385215,
+STORE, 140732053368832, 140732053381119,
+STORE, 140176020811776, 140176020840447,
+STORE, 140176020803584, 140176020811775,
+STORE, 140176014766080, 140176018599935,
+SNULL, 140176014766080, 140176016474111,
+STORE, 140176016474112, 140176018599935,
+STORE, 140176014766080, 140176016474111,
+SNULL, 140176018567167, 140176018599935,
+STORE, 140176016474112, 140176018567167,
+STORE, 140176018567168, 140176018599935,
+ERASE, 140176018567168, 140176018599935,
+STORE, 140176018567168, 140176018599935,
+STORE, 140176012570624, 140176014766079,
+SNULL, 140176012570624, 140176012664831,
+STORE, 140176012664832, 140176014766079,
+STORE, 140176012570624, 140176012664831,
+SNULL, 140176014757887, 140176014766079,
+STORE, 140176012664832, 140176014757887,
+STORE, 140176014757888, 140176014766079,
+ERASE, 140176014757888, 140176014766079,
+STORE, 140176014757888, 140176014766079,
+STORE, 140176010051584, 140176012570623,
+SNULL, 140176010051584, 140176010465279,
+STORE, 140176010465280, 140176012570623,
+STORE, 140176010051584, 140176010465279,
+SNULL, 140176012558335, 140176012570623,
+STORE, 140176010465280, 140176012558335,
+STORE, 140176012558336, 140176012570623,
+ERASE, 140176012558336, 140176012570623,
+STORE, 140176012558336, 140176012570623,
+STORE, 140176007417856, 140176010051583,
+SNULL, 140176007417856, 140176007946239,
+STORE, 140176007946240, 140176010051583,
+STORE, 140176007417856, 140176007946239,
+SNULL, 140176010043391, 140176010051583,
+STORE, 140176007946240, 140176010043391,
+STORE, 140176010043392, 140176010051583,
+ERASE, 140176010043392, 140176010051583,
+STORE, 140176010043392, 140176010051583,
+STORE, 140176005304320, 140176007417855,
+SNULL, 140176005304320, 140176005316607,
+STORE, 140176005316608, 140176007417855,
+STORE, 140176005304320, 140176005316607,
+SNULL, 140176007409663, 140176007417855,
+STORE, 140176005316608, 140176007409663,
+STORE, 140176007409664, 140176007417855,
+ERASE, 140176007409664, 140176007417855,
+STORE, 140176007409664, 140176007417855,
+STORE, 140176003100672, 140176005304319,
+SNULL, 140176003100672, 140176003203071,
+STORE, 140176003203072, 140176005304319,
+STORE, 140176003100672, 140176003203071,
+SNULL, 140176005296127, 140176005304319,
+STORE, 140176003203072, 140176005296127,
+STORE, 140176005296128, 140176005304319,
+ERASE, 140176005296128, 140176005304319,
+STORE, 140176005296128, 140176005304319,
+STORE, 140176020795392, 140176020811775,
+STORE, 140175999938560, 140176003100671,
+SNULL, 140175999938560, 140176000999423,
+STORE, 140176000999424, 140176003100671,
+STORE, 140175999938560, 140176000999423,
+SNULL, 140176003092479, 140176003100671,
+STORE, 140176000999424, 140176003092479,
+STORE, 140176003092480, 140176003100671,
+ERASE, 140176003092480, 140176003100671,
+STORE, 140176003092480, 140176003100671,
+STORE, 140175996141568, 140175999938559,
+SNULL, 140175996141568, 140175997800447,
+STORE, 140175997800448, 140175999938559,
+STORE, 140175996141568, 140175997800447,
+SNULL, 140175999897599, 140175999938559,
+STORE, 140175997800448, 140175999897599,
+STORE, 140175999897600, 140175999938559,
+SNULL, 140175999897600, 140175999922175,
+STORE, 140175999922176, 140175999938559,
+STORE, 140175999897600, 140175999922175,
+ERASE, 140175999897600, 140175999922175,
+STORE, 140175999897600, 140175999922175,
+ERASE, 140175999922176, 140175999938559,
+STORE, 140175999922176, 140175999938559,
+STORE, 140176020783104, 140176020811775,
+SNULL, 140175999913983, 140175999922175,
+STORE, 140175999897600, 140175999913983,
+STORE, 140175999913984, 140175999922175,
+SNULL, 140176003096575, 140176003100671,
+STORE, 140176003092480, 140176003096575,
+STORE, 140176003096576, 140176003100671,
+SNULL, 140176005300223, 140176005304319,
+STORE, 140176005296128, 140176005300223,
+STORE, 140176005300224, 140176005304319,
+SNULL, 140176007413759, 140176007417855,
+STORE, 140176007409664, 140176007413759,
+STORE, 140176007413760, 140176007417855,
+SNULL, 140176010047487, 140176010051583,
+STORE, 140176010043392, 140176010047487,
+STORE, 140176010047488, 140176010051583,
+SNULL, 140176012566527, 140176012570623,
+STORE, 140176012558336, 140176012566527,
+STORE, 140176012566528, 140176012570623,
+SNULL, 140176014761983, 140176014766079,
+STORE, 140176014757888, 140176014761983,
+STORE, 140176014761984, 140176014766079,
+SNULL, 140176018571263, 140176018599935,
+STORE, 140176018567168, 140176018571263,
+STORE, 140176018571264, 140176018599935,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140176020844543, 140176020848639,
+STORE, 140176020840448, 140176020844543,
+STORE, 140176020844544, 140176020848639,
+ERASE, 140176020811776, 140176020840447,
+STORE, 53080064, 53215231,
+STORE, 140176019099648, 140176020783103,
+STORE, 140176020836352, 140176020840447,
+STORE, 140176018964480, 140176019099647,
+STORE, 53080064, 53358591,
+STORE, 140175994044416, 140175996141567,
+STORE, 140176020828160, 140176020840447,
+STORE, 140176020819968, 140176020840447,
+STORE, 140176020783104, 140176020819967,
+STORE, 140176018948096, 140176019099647,
+STORE, 53080064, 53493759,
+STORE, 53080064, 53649407,
+STORE, 140176018939904, 140176019099647,
+STORE, 140176018931712, 140176019099647,
+STORE, 53080064, 53784575,
+STORE, 53080064, 53919743,
+STORE, 140176018915328, 140176019099647,
+STORE, 140176018907136, 140176019099647,
+STORE, 53080064, 54059007,
+STORE, 140175993769984, 140175996141567,
+STORE, 140176018747392, 140176019099647,
+STORE, 53080064, 54198271,
+SNULL, 54190079, 54198271,
+STORE, 53080064, 54190079,
+STORE, 54190080, 54198271,
+ERASE, 54190080, 54198271,
+SNULL, 54181887, 54190079,
+STORE, 53080064, 54181887,
+STORE, 54181888, 54190079,
+ERASE, 54181888, 54190079,
+SNULL, 54173695, 54181887,
+STORE, 53080064, 54173695,
+STORE, 54173696, 54181887,
+ERASE, 54173696, 54181887,
+SNULL, 54165503, 54173695,
+STORE, 53080064, 54165503,
+STORE, 54165504, 54173695,
+ERASE, 54165504, 54173695,
+STORE, 140175993753600, 140175996141567,
+STORE, 140175993688064, 140175996141567,
+STORE, 140175993655296, 140175996141567,
+STORE, 140175991558144, 140175996141567,
+STORE, 140175991492608, 140175996141567,
+STORE, 53080064, 54312959,
+STORE, 140175991361536, 140175996141567,
+STORE, 140175991099392, 140175996141567,
+STORE, 140175991091200, 140175996141567,
+STORE, 140175991074816, 140175996141567,
+STORE, 140175991066624, 140175996141567,
+STORE, 140175991058432, 140175996141567,
+STORE, 53080064, 54448127,
+SNULL, 54439935, 54448127,
+STORE, 53080064, 54439935,
+STORE, 54439936, 54448127,
+ERASE, 54439936, 54448127,
+SNULL, 54431743, 54439935,
+STORE, 53080064, 54431743,
+STORE, 54431744, 54439935,
+ERASE, 54431744, 54439935,
+SNULL, 54419455, 54431743,
+STORE, 53080064, 54419455,
+STORE, 54419456, 54431743,
+ERASE, 54419456, 54431743,
+SNULL, 54403071, 54419455,
+STORE, 53080064, 54403071,
+STORE, 54403072, 54419455,
+ERASE, 54403072, 54419455,
+STORE, 140175991042048, 140175996141567,
+STORE, 53080064, 54538239,
+SNULL, 54534143, 54538239,
+STORE, 53080064, 54534143,
+STORE, 54534144, 54538239,
+ERASE, 54534144, 54538239,
+SNULL, 54530047, 54534143,
+STORE, 53080064, 54530047,
+STORE, 54530048, 54534143,
+ERASE, 54530048, 54534143,
+SNULL, 54525951, 54530047,
+STORE, 53080064, 54525951,
+STORE, 54525952, 54530047,
+ERASE, 54525952, 54530047,
+SNULL, 54521855, 54525951,
+STORE, 53080064, 54521855,
+STORE, 54521856, 54525951,
+ERASE, 54521856, 54525951,
+SNULL, 54517759, 54521855,
+STORE, 53080064, 54517759,
+STORE, 54517760, 54521855,
+ERASE, 54517760, 54521855,
+SNULL, 54513663, 54517759,
+STORE, 53080064, 54513663,
+STORE, 54513664, 54517759,
+ERASE, 54513664, 54517759,
+SNULL, 54509567, 54513663,
+STORE, 53080064, 54509567,
+STORE, 54509568, 54513663,
+ERASE, 54509568, 54513663,
+STORE, 140175991025664, 140175996141567,
+STORE, 140175990992896, 140175996141567,
+STORE, 53080064, 54644735,
+SNULL, 54628351, 54644735,
+STORE, 53080064, 54628351,
+STORE, 54628352, 54644735,
+ERASE, 54628352, 54644735,
+SNULL, 54616063, 54628351,
+STORE, 53080064, 54616063,
+STORE, 54616064, 54628351,
+ERASE, 54616064, 54628351,
+STORE, 140175988895744, 140175996141567,
+STORE, 53080064, 54767615,
+STORE, 140175988879360, 140175996141567,
+STORE, 140175988617216, 140175996141567,
+STORE, 140175988609024, 140175996141567,
+STORE, 140175988600832, 140175996141567,
+STORE, 53080064, 54906879,
+SNULL, 54898687, 54906879,
+STORE, 53080064, 54898687,
+STORE, 54898688, 54906879,
+ERASE, 54898688, 54906879,
+SNULL, 54853631, 54898687,
+STORE, 53080064, 54853631,
+STORE, 54853632, 54898687,
+ERASE, 54853632, 54898687,
+STORE, 140175986503680, 140175996141567,
+STORE, 53080064, 54996991,
+STORE, 140175986495488, 140175996141567,
+STORE, 140175986487296, 140175996141567,
+STORE, 140175985438720, 140175996141567,
+STORE, 53080064, 55136255,
+STORE, 140175985405952, 140175996141567,
+STORE, 140175985139712, 140175996141567,
+SNULL, 140176018964479, 140176019099647,
+STORE, 140176018747392, 140176018964479,
+STORE, 140176018964480, 140176019099647,
+ERASE, 140176018964480, 140176019099647,
+STORE, 140175983042560, 140175996141567,
+STORE, 140175982518272, 140175996141567,
+STORE, 140175980421120, 140175996141567,
+STORE, 53080064, 55287807,
+STORE, 53080064, 55427071,
+STORE, 140176019091456, 140176019099647,
+STORE, 140176019083264, 140176019099647,
+STORE, 140176019075072, 140176019099647,
+STORE, 140176019066880, 140176019099647,
+STORE, 140176019058688, 140176019099647,
+STORE, 140175980158976, 140175996141567,
+STORE, 140176019050496, 140176019099647,
+STORE, 140176019042304, 140176019099647,
+STORE, 140176019034112, 140176019099647,
+STORE, 140176019025920, 140176019099647,
+STORE, 140176019017728, 140176019099647,
+STORE, 140176019009536, 140176019099647,
+STORE, 140176019001344, 140176019099647,
+STORE, 140176018993152, 140176019099647,
+STORE, 140176018984960, 140176019099647,
+STORE, 140176018976768, 140176019099647,
+STORE, 140176018968576, 140176019099647,
+STORE, 140175978061824, 140175996141567,
+STORE, 53080064, 55603199,
+STORE, 140175978029056, 140175996141567,
+STORE, 140175977996288, 140175996141567,
+STORE, 53080064, 55738367,
+STORE, 53080064, 55881727,
+STORE, 140175977963520, 140175996141567,
+STORE, 140175977930752, 140175996141567,
+STORE, 53080064, 56041471,
+STORE, 140175977897984, 140175996141567,
+STORE, 140175977865216, 140175996141567,
+SNULL, 55881727, 56041471,
+STORE, 53080064, 55881727,
+STORE, 55881728, 56041471,
+ERASE, 55881728, 56041471,
+SNULL, 55721983, 55881727,
+STORE, 53080064, 55721983,
+STORE, 55721984, 55881727,
+ERASE, 55721984, 55881727,
+SNULL, 55570431, 55721983,
+STORE, 53080064, 55570431,
+STORE, 55570432, 55721983,
+ERASE, 55570432, 55721983,
+STORE, 140175977857024, 140175996141567,
+STORE, 140175975759872, 140175996141567,
+STORE, 53080064, 55754751,
+STORE, 53080064, 55943167,
+STORE, 140175975751680, 140175996141567,
+STORE, 140175975743488, 140175996141567,
+STORE, 140175975735296, 140175996141567,
+STORE, 140175975727104, 140175996141567,
+STORE, 140175975718912, 140175996141567,
+STORE, 140175975710720, 140175996141567,
+STORE, 140175975702528, 140175996141567,
+STORE, 140175975694336, 140175996141567,
+STORE, 140175975686144, 140175996141567,
+STORE, 140175975677952, 140175996141567,
+STORE, 140175975669760, 140175996141567,
+STORE, 140175974621184, 140175996141567,
+STORE, 140175974612992, 140175996141567,
+STORE, 53080064, 56139775,
+STORE, 140175972515840, 140175996141567,
+STORE, 53080064, 56401919,
+STORE, 140175970418688, 140175996141567,
+STORE, 140175970410496, 140175996141567,
+STORE, 140175970402304, 140175996141567,
+STORE, 140175970394112, 140175996141567,
+STORE, 53080064, 56569855,
+STORE, 140175969865728, 140175996141567,
+SNULL, 140175985139711, 140175996141567,
+STORE, 140175969865728, 140175985139711,
+STORE, 140175985139712, 140175996141567,
+SNULL, 140175985139712, 140175985405951,
+STORE, 140175985405952, 140175996141567,
+STORE, 140175985139712, 140175985405951,
+ERASE, 140175985139712, 140175985405951,
+STORE, 140175965671424, 140175985139711,
+STORE, 140175985397760, 140175996141567,
+STORE, 140175985389568, 140175996141567,
+STORE, 140175985381376, 140175996141567,
+STORE, 140175985373184, 140175996141567,
+STORE, 140175985364992, 140175996141567,
+STORE, 140175985356800, 140175996141567,
+STORE, 140175985348608, 140175996141567,
+STORE, 140175985340416, 140175996141567,
+STORE, 140175985332224, 140175996141567,
+STORE, 140175985324032, 140175996141567,
+STORE, 140175985315840, 140175996141567,
+STORE, 140175985307648, 140175996141567,
+STORE, 140175985299456, 140175996141567,
+STORE, 140175985291264, 140175996141567,
+STORE, 140175985283072, 140175996141567,
+STORE, 140175985274880, 140175996141567,
+STORE, 140175963574272, 140175985139711,
+STORE, 140175985266688, 140175996141567,
+STORE, 140175961477120, 140175985139711,
+STORE, 53080064, 56831999,
+STORE, 140175959379968, 140175985139711,
+STORE, 140175985258496, 140175996141567,
+STORE, 140175957282816, 140175985139711,
+STORE, 140175985250304, 140175996141567,
+STORE, 140175985242112, 140175996141567,
+STORE, 140175985233920, 140175996141567,
+STORE, 140175985225728, 140175996141567,
+STORE, 140175985217536, 140175996141567,
+STORE, 140175957151744, 140175985139711,
+STORE, 140175956627456, 140175985139711,
+SNULL, 140175980158975, 140175985139711,
+STORE, 140175956627456, 140175980158975,
+STORE, 140175980158976, 140175985139711,
+SNULL, 140175980158976, 140175980421119,
+STORE, 140175980421120, 140175985139711,
+STORE, 140175980158976, 140175980421119,
+ERASE, 140175980158976, 140175980421119,
+STORE, 140175954530304, 140175980158975,
+STORE, 140175985209344, 140175996141567,
+STORE, 53080064, 57094143,
+STORE, 140175952433152, 140175980158975,
+STORE, 140175985192960, 140175996141567,
+STORE, 140175985184768, 140175996141567,
+STORE, 140175985176576, 140175996141567,
+STORE, 140175985168384, 140175996141567,
+STORE, 140175985160192, 140175996141567,
+STORE, 140175985152000, 140175996141567,
+STORE, 140175985143808, 140175996141567,
+STORE, 140175980412928, 140175985139711,
+STORE, 140175980404736, 140175985139711,
+STORE, 140175980396544, 140175985139711,
+STORE, 140175980388352, 140175985139711,
+STORE, 140175980380160, 140175985139711,
+STORE, 140175980371968, 140175985139711,
+STORE, 140175980363776, 140175985139711,
+STORE, 140175980355584, 140175985139711,
+STORE, 140175980347392, 140175985139711,
+STORE, 140175980339200, 140175985139711,
+STORE, 53080064, 57356287,
+SNULL, 140176018747392, 140176018907135,
+STORE, 140176018907136, 140176018964479,
+STORE, 140176018747392, 140176018907135,
+ERASE, 140176018747392, 140176018907135,
+STORE, 140175952146432, 140175980158975,
+STORE, 140175950049280, 140175980158975,
+SNULL, 140175952146431, 140175980158975,
+STORE, 140175950049280, 140175952146431,
+STORE, 140175952146432, 140175980158975,
+SNULL, 140175952146432, 140175952433151,
+STORE, 140175952433152, 140175980158975,
+STORE, 140175952146432, 140175952433151,
+ERASE, 140175952146432, 140175952433151,
+STORE, 140176018898944, 140176018964479,
+STORE, 53080064, 57749503,
+STORE, 140175949520896, 140175952146431,
+STORE, 140175947423744, 140175952146431,
+SNULL, 140175993769983, 140175996141567,
+STORE, 140175985143808, 140175993769983,
+STORE, 140175993769984, 140175996141567,
+SNULL, 140175993769984, 140175994044415,
+STORE, 140175994044416, 140175996141567,
+STORE, 140175993769984, 140175994044415,
+ERASE, 140175993769984, 140175994044415,
+STORE, 140176018890752, 140176018964479,
+STORE, 140176018882560, 140176018964479,
+STORE, 140176018874368, 140176018964479,
+STORE, 140176018866176, 140176018964479,
+STORE, 140176018849792, 140176018964479,
+STORE, 140176018841600, 140176018964479,
+STORE, 140176018825216, 140176018964479,
+STORE, 140176018817024, 140176018964479,
+STORE, 140176018800640, 140176018964479,
+STORE, 140176018792448, 140176018964479,
+STORE, 140176018759680, 140176018964479,
+STORE, 140176018751488, 140176018964479,
+STORE, 140175994028032, 140175996141567,
+STORE, 140176018743296, 140176018964479,
+STORE, 140175994011648, 140175996141567,
+STORE, 140175994003456, 140175996141567,
+STORE, 140175993987072, 140175996141567,
+STORE, 140175993978880, 140175996141567,
+STORE, 140175993946112, 140175996141567,
+STORE, 140175993937920, 140175996141567,
+STORE, 140175993921536, 140175996141567,
+STORE, 140175993913344, 140175996141567,
+STORE, 140175993896960, 140175996141567,
+STORE, 140175993888768, 140175996141567,
+STORE, 140175993872384, 140175996141567,
+STORE, 140175993864192, 140175996141567,
+STORE, 140175993831424, 140175996141567,
+STORE, 140175993823232, 140175996141567,
+STORE, 140175993806848, 140175996141567,
+STORE, 140175993798656, 140175996141567,
+STORE, 140175993782272, 140175996141567,
+STORE, 140175993774080, 140175996141567,
+STORE, 140175980322816, 140175985139711,
+STORE, 140175980314624, 140175985139711,
+STORE, 140175980281856, 140175985139711,
+STORE, 140175980273664, 140175985139711,
+STORE, 140175980257280, 140175985139711,
+STORE, 140175945326592, 140175952146431,
+STORE, 140175980249088, 140175985139711,
+STORE, 140175980232704, 140175985139711,
+STORE, 140175980224512, 140175985139711,
+STORE, 140175980208128, 140175985139711,
+STORE, 140175980199936, 140175985139711,
+STORE, 140175980167168, 140175985139711,
+STORE, 140175952433152, 140175985139711,
+STORE, 140175952416768, 140175985139711,
+STORE, 140175952408576, 140175985139711,
+STORE, 140175952392192, 140175985139711,
+STORE, 140175952384000, 140175985139711,
+STORE, 140175952367616, 140175985139711,
+STORE, 140175943229440, 140175952146431,
+STORE, 140175952359424, 140175985139711,
+STORE, 140175952326656, 140175985139711,
+STORE, 140175952318464, 140175985139711,
+STORE, 140175952302080, 140175985139711,
+STORE, 140175952293888, 140175985139711,
+STORE, 140175952277504, 140175985139711,
+STORE, 140175952269312, 140175985139711,
+STORE, 140175952252928, 140175985139711,
+STORE, 140175952244736, 140175985139711,
+STORE, 140175952211968, 140175985139711,
+STORE, 140175952203776, 140175985139711,
+STORE, 140175952187392, 140175985139711,
+STORE, 140175952179200, 140175985139711,
+STORE, 140175952162816, 140175985139711,
+STORE, 140175952154624, 140175985139711,
+STORE, 140175943213056, 140175952146431,
+STORE, 140175943213056, 140175985139711,
+STORE, 140175943180288, 140175985139711,
+STORE, 140175943172096, 140175985139711,
+STORE, 140175943155712, 140175985139711,
+STORE, 140175943147520, 140175985139711,
+STORE, 140175943131136, 140175985139711,
+STORE, 140175943122944, 140175985139711,
+STORE, 140175943106560, 140175985139711,
+STORE, 140175943098368, 140175985139711,
+STORE, 140175943065600, 140175985139711,
+STORE, 140175943057408, 140175985139711,
+STORE, 140175943041024, 140175985139711,
+STORE, 140175943032832, 140175985139711,
+STORE, 140175943016448, 140175985139711,
+STORE, 140175943008256, 140175985139711,
+STORE, 140175942991872, 140175985139711,
+STORE, 140175942983680, 140175985139711,
+STORE, 140175942950912, 140175985139711,
+STORE, 140175942942720, 140175985139711,
+STORE, 140175942926336, 140175985139711,
+STORE, 140175942918144, 140175985139711,
+STORE, 140175942901760, 140175985139711,
+STORE, 140175942893568, 140175985139711,
+STORE, 140175942877184, 140175985139711,
+STORE, 140175942868992, 140175985139711,
+STORE, 140175942836224, 140175985139711,
+STORE, 140175942828032, 140175985139711,
+STORE, 140175942811648, 140175985139711,
+STORE, 140175942803456, 140175985139711,
+STORE, 140175942787072, 140175985139711,
+STORE, 140175942778880, 140175985139711,
+STORE, 140175942762496, 140175985139711,
+STORE, 140175942754304, 140175985139711,
+STORE, 140175942721536, 140175985139711,
+STORE, 140175942713344, 140175985139711,
+STORE, 140175942696960, 140175985139711,
+STORE, 140175942688768, 140175985139711,
+STORE, 140175942672384, 140175985139711,
+STORE, 140175942664192, 140175985139711,
+STORE, 140175942647808, 140175985139711,
+STORE, 140175942639616, 140175985139711,
+STORE, 140175942606848, 140175985139711,
+STORE, 140175942598656, 140175985139711,
+STORE, 140175942582272, 140175985139711,
+STORE, 140175942574080, 140175985139711,
+STORE, 140175942557696, 140175985139711,
+STORE, 140175942549504, 140175985139711,
+STORE, 140175942533120, 140175985139711,
+STORE, 140175942524928, 140175985139711,
+STORE, 140175942492160, 140175985139711,
+STORE, 140175942483968, 140175985139711,
+STORE, 140175942467584, 140175985139711,
+STORE, 140175942459392, 140175985139711,
+STORE, 140175942443008, 140175985139711,
+STORE, 140175942434816, 140175985139711,
+STORE, 140175942418432, 140175985139711,
+STORE, 140175942410240, 140175985139711,
+STORE, 140175942377472, 140175985139711,
+STORE, 140175942369280, 140175985139711,
+STORE, 140175942352896, 140175985139711,
+STORE, 140175942344704, 140175985139711,
+STORE, 140175942328320, 140175985139711,
+STORE, 140175942320128, 140175985139711,
+STORE, 140175942303744, 140175985139711,
+STORE, 140175942295552, 140175985139711,
+STORE, 140175942262784, 140175985139711,
+STORE, 140175942254592, 140175985139711,
+STORE, 140175942238208, 140175985139711,
+STORE, 140175942230016, 140175985139711,
+STORE, 140175942213632, 140175985139711,
+STORE, 140175942205440, 140175985139711,
+STORE, 140175942189056, 140175985139711,
+STORE, 140175942180864, 140175985139711,
+STORE, 140175942148096, 140175985139711,
+STORE, 140175942139904, 140175985139711,
+STORE, 140175942123520, 140175985139711,
+STORE, 140175942115328, 140175985139711,
+STORE, 140175942098944, 140175985139711,
+STORE, 140175942090752, 140175985139711,
+STORE, 140175942074368, 140175985139711,
+STORE, 140175942066176, 140175985139711,
+STORE, 140175942033408, 140175985139711,
+STORE, 140175942025216, 140175985139711,
+STORE, 140175942008832, 140175985139711,
+STORE, 140175942000640, 140175985139711,
+STORE, 140175941984256, 140175985139711,
+STORE, 140175941976064, 140175985139711,
+STORE, 140175941959680, 140175985139711,
+STORE, 140175939862528, 140175985139711,
+STORE, 140175939854336, 140175985139711,
+STORE, 140175939821568, 140175985139711,
+STORE, 140175939813376, 140175985139711,
+STORE, 140175939796992, 140175985139711,
+STORE, 140175939788800, 140175985139711,
+STORE, 140175939772416, 140175985139711,
+STORE, 140175939764224, 140175985139711,
+STORE, 140175939747840, 140175985139711,
+STORE, 140175939739648, 140175985139711,
+STORE, 140175939706880, 140175985139711,
+STORE, 140175939698688, 140175985139711,
+STORE, 140175939682304, 140175985139711,
+STORE, 140175939674112, 140175985139711,
+STORE, 140175939657728, 140175985139711,
+STORE, 140175939649536, 140175985139711,
+STORE, 140175939633152, 140175985139711,
+STORE, 140175939624960, 140175985139711,
+STORE, 140175939592192, 140175985139711,
+STORE, 140175939584000, 140175985139711,
+STORE, 140175939567616, 140175985139711,
+STORE, 140175939559424, 140175985139711,
+STORE, 140175939543040, 140175985139711,
+STORE, 140175939534848, 140175985139711,
+STORE, 140175939518464, 140175985139711,
+STORE, 140175939510272, 140175985139711,
+STORE, 140175939477504, 140175985139711,
+STORE, 140175939469312, 140175985139711,
+STORE, 140175939452928, 140175985139711,
+STORE, 140175939444736, 140175985139711,
+STORE, 140175939428352, 140175985139711,
+STORE, 140175939420160, 140175985139711,
+STORE, 140175939403776, 140175985139711,
+STORE, 140175939395584, 140175985139711,
+STORE, 140175939362816, 140175985139711,
+STORE, 140175939354624, 140175985139711,
+STORE, 140175939338240, 140175985139711,
+STORE, 140175939330048, 140175985139711,
+STORE, 140175939313664, 140175985139711,
+STORE, 140175939305472, 140175985139711,
+STORE, 140175939289088, 140175985139711,
+STORE, 140175939280896, 140175985139711,
+STORE, 140175939248128, 140175985139711,
+STORE, 140175939239936, 140175985139711,
+STORE, 140175939223552, 140175985139711,
+STORE, 140175939215360, 140175985139711,
+STORE, 140175939198976, 140175985139711,
+STORE, 140175939190784, 140175985139711,
+STORE, 140175939174400, 140175985139711,
+STORE, 140175939166208, 140175985139711,
+STORE, 140175939133440, 140175985139711,
+STORE, 140175939125248, 140175985139711,
+STORE, 140175939108864, 140175985139711,
+STORE, 140175939100672, 140175985139711,
+STORE, 140175939084288, 140175985139711,
+STORE, 140175939076096, 140175985139711,
+STORE, 140175939059712, 140175985139711,
+STORE, 140175939051520, 140175985139711,
+STORE, 140175939018752, 140175985139711,
+STORE, 140175939010560, 140175985139711,
+STORE, 140175938994176, 140175985139711,
+STORE, 140175938985984, 140175985139711,
+STORE, 140175938969600, 140175985139711,
+STORE, 140175938961408, 140175985139711,
+STORE, 140175938945024, 140175985139711,
+STORE, 140175938936832, 140175985139711,
+STORE, 140175938904064, 140175985139711,
+STORE, 140175938895872, 140175985139711,
+STORE, 140175938879488, 140175985139711,
+STORE, 140175938871296, 140175985139711,
+STORE, 140175938854912, 140175985139711,
+STORE, 140175938846720, 140175985139711,
+STORE, 140175938830336, 140175985139711,
+STORE, 140175938822144, 140175985139711,
+STORE, 140175938789376, 140175985139711,
+STORE, 140175938781184, 140175985139711,
+STORE, 140175938764800, 140175985139711,
+STORE, 140175938756608, 140175985139711,
+STORE, 140175938740224, 140175985139711,
+STORE, 140175938732032, 140175985139711,
+STORE, 140175938715648, 140175985139711,
+STORE, 140175938707456, 140175985139711,
+STORE, 140175938674688, 140175985139711,
+STORE, 140175938666496, 140175985139711,
+STORE, 140175938650112, 140175985139711,
+STORE, 140175938641920, 140175985139711,
+STORE, 140175938625536, 140175985139711,
+STORE, 140175938617344, 140175985139711,
+STORE, 140175938600960, 140175985139711,
+STORE, 140175938592768, 140175985139711,
+STORE, 140175938560000, 140175985139711,
+STORE, 140175938551808, 140175985139711,
+STORE, 140175938535424, 140175985139711,
+STORE, 140175938527232, 140175985139711,
+STORE, 140175938510848, 140175985139711,
+STORE, 140175938502656, 140175985139711,
+STORE, 140175938486272, 140175985139711,
+STORE, 140175938478080, 140175985139711,
+STORE, 140175938445312, 140175985139711,
+STORE, 140175938437120, 140175985139711,
+STORE, 140175938420736, 140175985139711,
+STORE, 140175938412544, 140175985139711,
+STORE, 140175938396160, 140175985139711,
+STORE, 140175938387968, 140175985139711,
+STORE, 140175938371584, 140175985139711,
+STORE, 140175938363392, 140175985139711,
+STORE, 140175938330624, 140175985139711,
+STORE, 140175938322432, 140175985139711,
+STORE, 140175938306048, 140175985139711,
+STORE, 140175938297856, 140175985139711,
+STORE, 140175938281472, 140175985139711,
+STORE, 140175938273280, 140175985139711,
+STORE, 140175938256896, 140175985139711,
+STORE, 140175938248704, 140175985139711,
+STORE, 140175938215936, 140175985139711,
+STORE, 140175938207744, 140175985139711,
+STORE, 140175938191360, 140175985139711,
+STORE, 140175938183168, 140175985139711,
+STORE, 140175938166784, 140175985139711,
+STORE, 140175938158592, 140175985139711,
+STORE, 140175938142208, 140175985139711,
+STORE, 140175936045056, 140175985139711,
+STORE, 140175936036864, 140175985139711,
+STORE, 140175936004096, 140175985139711,
+STORE, 140175935995904, 140175985139711,
+STORE, 140175935979520, 140175985139711,
+STORE, 140175935971328, 140175985139711,
+STORE, 140175935954944, 140175985139711,
+STORE, 140175935946752, 140175985139711,
+STORE, 140175935930368, 140175985139711,
+STORE, 140175935922176, 140175985139711,
+STORE, 140175935889408, 140175985139711,
+STORE, 140175935881216, 140175985139711,
+STORE, 140175935864832, 140175985139711,
+STORE, 140175935856640, 140175985139711,
+STORE, 140175935840256, 140175985139711,
+STORE, 140175935832064, 140175985139711,
+STORE, 140175935815680, 140175985139711,
+STORE, 140175935807488, 140175985139711,
+STORE, 140175935774720, 140175985139711,
+STORE, 140175935766528, 140175985139711,
+STORE, 140175935750144, 140175985139711,
+STORE, 140175935741952, 140175985139711,
+STORE, 140175935725568, 140175985139711,
+STORE, 140175935717376, 140175985139711,
+STORE, 140175935700992, 140175985139711,
+STORE, 140175935692800, 140175985139711,
+STORE, 140175935660032, 140175985139711,
+STORE, 140175935651840, 140175985139711,
+STORE, 140175935635456, 140175985139711,
+STORE, 140175935627264, 140175985139711,
+STORE, 140175935610880, 140175985139711,
+STORE, 140175935602688, 140175985139711,
+STORE, 140175935586304, 140175985139711,
+STORE, 140175935578112, 140175985139711,
+STORE, 140175935545344, 140175985139711,
+STORE, 140175935537152, 140175985139711,
+STORE, 140175935520768, 140175985139711,
+STORE, 140175935512576, 140175985139711,
+STORE, 140175935496192, 140175985139711,
+STORE, 140175935488000, 140175985139711,
+STORE, 140175935471616, 140175985139711,
+STORE, 140175935463424, 140175985139711,
+STORE, 140175935430656, 140175985139711,
+STORE, 140175935422464, 140175985139711,
+STORE, 140175935406080, 140175985139711,
+STORE, 140175935397888, 140175985139711,
+STORE, 140175935381504, 140175985139711,
+STORE, 140175935373312, 140175985139711,
+STORE, 140175935356928, 140175985139711,
+STORE, 140175935348736, 140175985139711,
+STORE, 140175935315968, 140175985139711,
+STORE, 140175935307776, 140175985139711,
+STORE, 140175935291392, 140175985139711,
+STORE, 140175935283200, 140175985139711,
+STORE, 140175935266816, 140175985139711,
+STORE, 140175935258624, 140175985139711,
+STORE, 140175935242240, 140175985139711,
+STORE, 140175935234048, 140175985139711,
+STORE, 140175935201280, 140175985139711,
+STORE, 140175935193088, 140175985139711,
+STORE, 140175935176704, 140175985139711,
+STORE, 140175935168512, 140175985139711,
+STORE, 140175935152128, 140175985139711,
+STORE, 140175935143936, 140175985139711,
+STORE, 140175935127552, 140175985139711,
+STORE, 140175935119360, 140175985139711,
+STORE, 140175935086592, 140175985139711,
+STORE, 140175935078400, 140175985139711,
+STORE, 140175935062016, 140175985139711,
+STORE, 140175935053824, 140175985139711,
+STORE, 140175935037440, 140175985139711,
+STORE, 140175935029248, 140175985139711,
+STORE, 140175935012864, 140175985139711,
+STORE, 140175935004672, 140175985139711,
+STORE, 140175934971904, 140175985139711,
+STORE, 140175934963712, 140175985139711,
+STORE, 140175934947328, 140175985139711,
+STORE, 140175934939136, 140175985139711,
+STORE, 140175934922752, 140175985139711,
+STORE, 140175934914560, 140175985139711,
+STORE, 140175934898176, 140175985139711,
+STORE, 140175934889984, 140175985139711,
+STORE, 140175934857216, 140175985139711,
+STORE, 140175934849024, 140175985139711,
+STORE, 140175934832640, 140175985139711,
+STORE, 140175934824448, 140175985139711,
+STORE, 140175934808064, 140175985139711,
+STORE, 140175934799872, 140175985139711,
+STORE, 140175934783488, 140175985139711,
+STORE, 140175934775296, 140175985139711,
+STORE, 140175934742528, 140175985139711,
+STORE, 140175934734336, 140175985139711,
+STORE, 140175934717952, 140175985139711,
+STORE, 140175934709760, 140175985139711,
+STORE, 140175934693376, 140175985139711,
+STORE, 140175934685184, 140175985139711,
+STORE, 140175934668800, 140175985139711,
+STORE, 140175934660608, 140175985139711,
+STORE, 140175934627840, 140175985139711,
+STORE, 140175934619648, 140175985139711,
+STORE, 140175934603264, 140175985139711,
+STORE, 140175934595072, 140175985139711,
+STORE, 140175934578688, 140175985139711,
+STORE, 140175934570496, 140175985139711,
+STORE, 140175934554112, 140175985139711,
+STORE, 140175934545920, 140175985139711,
+STORE, 140175934513152, 140175985139711,
+STORE, 140175934504960, 140175985139711,
+STORE, 140175934488576, 140175985139711,
+STORE, 140175934480384, 140175985139711,
+STORE, 140175934464000, 140175985139711,
+STORE, 140175934455808, 140175985139711,
+STORE, 140175934439424, 140175985139711,
+STORE, 140175934431232, 140175985139711,
+STORE, 140175934398464, 140175985139711,
+STORE, 140175934390272, 140175985139711,
+STORE, 140175934373888, 140175985139711,
+STORE, 140175934365696, 140175985139711,
+STORE, 140175934349312, 140175985139711,
+STORE, 140175934341120, 140175985139711,
+STORE, 140175934324736, 140175985139711,
+STORE, 140175932227584, 140175985139711,
+STORE, 140175932219392, 140175985139711,
+STORE, 140175932186624, 140175985139711,
+STORE, 140175932178432, 140175985139711,
+STORE, 140175932162048, 140175985139711,
+STORE, 140175932153856, 140175985139711,
+STORE, 140175932137472, 140175985139711,
+STORE, 53080064, 57884671,
+STORE, 140175932129280, 140175985139711,
+STORE, 140175932112896, 140175985139711,
+STORE, 140175932104704, 140175985139711,
+STORE, 140175932071936, 140175985139711,
+STORE, 140175932063744, 140175985139711,
+STORE, 140175932047360, 140175985139711,
+STORE, 140175932039168, 140175985139711,
+STORE, 140175932022784, 140175985139711,
+STORE, 140175932014592, 140175985139711,
+STORE, 140175931998208, 140175985139711,
+STORE, 140175931990016, 140175985139711,
+STORE, 140175931957248, 140175985139711,
+STORE, 140175931949056, 140175985139711,
+STORE, 140175931932672, 140175985139711,
+STORE, 140175931924480, 140175985139711,
+STORE, 140175931908096, 140175985139711,
+STORE, 140175931899904, 140175985139711,
+STORE, 140175931883520, 140175985139711,
+STORE, 140175931875328, 140175985139711,
+STORE, 140175931842560, 140175985139711,
+STORE, 140175931834368, 140175985139711,
+STORE, 140175931817984, 140175985139711,
+STORE, 140175931809792, 140175985139711,
+STORE, 140175931793408, 140175985139711,
+STORE, 140175931785216, 140175985139711,
+STORE, 140175931768832, 140175985139711,
+STORE, 140175931760640, 140175985139711,
+STORE, 140175931727872, 140175985139711,
+STORE, 140175931719680, 140175985139711,
+STORE, 140175931703296, 140175985139711,
+STORE, 140175931695104, 140175985139711,
+STORE, 140175931678720, 140175985139711,
+STORE, 140175931670528, 140175985139711,
+STORE, 140175931654144, 140175985139711,
+STORE, 140175931645952, 140175985139711,
+STORE, 140175931613184, 140175985139711,
+STORE, 140175931604992, 140175985139711,
+STORE, 140175931588608, 140175985139711,
+STORE, 140175931580416, 140175985139711,
+STORE, 140175931564032, 140175985139711,
+STORE, 140175931555840, 140175985139711,
+STORE, 140175931539456, 140175985139711,
+STORE, 140175931531264, 140175985139711,
+STORE, 140175931498496, 140175985139711,
+STORE, 140175931490304, 140175985139711,
+STORE, 140175931473920, 140175985139711,
+STORE, 140175931465728, 140175985139711,
+STORE, 140175931449344, 140175985139711,
+STORE, 140175931441152, 140175985139711,
+STORE, 140175931424768, 140175985139711,
+STORE, 140175931416576, 140175985139711,
+STORE, 140175931383808, 140175985139711,
+STORE, 140175931375616, 140175985139711,
+STORE, 140175931359232, 140175985139711,
+STORE, 140175931351040, 140175985139711,
+STORE, 140175931334656, 140175985139711,
+STORE, 140175931326464, 140175985139711,
+STORE, 140175931310080, 140175985139711,
+STORE, 140175931301888, 140175985139711,
+STORE, 140175931269120, 140175985139711,
+STORE, 140175931260928, 140175985139711,
+STORE, 140175931244544, 140175985139711,
+STORE, 140175931236352, 140175985139711,
+STORE, 140175931219968, 140175985139711,
+STORE, 140175931211776, 140175985139711,
+STORE, 140175931195392, 140175985139711,
+STORE, 140175931187200, 140175985139711,
+STORE, 140175931154432, 140175985139711,
+STORE, 140175931146240, 140175985139711,
+STORE, 140175931129856, 140175985139711,
+STORE, 140175931121664, 140175985139711,
+STORE, 140175931105280, 140175985139711,
+STORE, 140175931097088, 140175985139711,
+STORE, 140175931080704, 140175985139711,
+STORE, 140175931072512, 140175985139711,
+STORE, 140175931039744, 140175985139711,
+STORE, 140175931031552, 140175985139711,
+STORE, 140175931015168, 140175985139711,
+STORE, 140175931006976, 140175985139711,
+STORE, 140175930990592, 140175985139711,
+STORE, 140175930982400, 140175985139711,
+STORE, 140175930966016, 140175985139711,
+STORE, 140175930957824, 140175985139711,
+STORE, 140175930925056, 140175985139711,
+STORE, 140175930916864, 140175985139711,
+STORE, 140175930900480, 140175985139711,
+STORE, 140175930892288, 140175985139711,
+STORE, 140175930875904, 140175985139711,
+STORE, 140175930867712, 140175985139711,
+STORE, 140175930851328, 140175985139711,
+STORE, 140175930843136, 140175985139711,
+STORE, 140175930810368, 140175985139711,
+STORE, 140175930802176, 140175985139711,
+STORE, 140175930785792, 140175985139711,
+STORE, 140175930777600, 140175985139711,
+STORE, 140175930761216, 140175985139711,
+STORE, 140175930753024, 140175985139711,
+STORE, 140175930736640, 140175985139711,
+STORE, 140175930728448, 140175985139711,
+STORE, 140175930695680, 140175985139711,
+STORE, 140175930687488, 140175985139711,
+STORE, 140175930671104, 140175985139711,
+STORE, 140175930662912, 140175985139711,
+STORE, 140175930646528, 140175985139711,
+STORE, 140175930638336, 140175985139711,
+STORE, 140175930621952, 140175985139711,
+STORE, 140175930613760, 140175985139711,
+STORE, 140175930580992, 140175985139711,
+STORE, 140175930572800, 140175985139711,
+STORE, 140175930556416, 140175985139711,
+STORE, 140175930548224, 140175985139711,
+STORE, 140175930531840, 140175985139711,
+STORE, 140175930523648, 140175985139711,
+STORE, 140175930507264, 140175985139711,
+STORE, 140175928410112, 140175985139711,
+STORE, 140175928401920, 140175985139711,
+STORE, 140175928369152, 140175985139711,
+STORE, 140175928360960, 140175985139711,
+STORE, 140175928344576, 140175985139711,
+STORE, 140175928336384, 140175985139711,
+STORE, 140175928320000, 140175985139711,
+STORE, 140175928311808, 140175985139711,
+STORE, 140175928295424, 140175985139711,
+STORE, 140175927242752, 140175985139711,
+SNULL, 140175956627455, 140175985139711,
+STORE, 140175927242752, 140175956627455,
+STORE, 140175956627456, 140175985139711,
+ };
+ unsigned long set24[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140735281639424, 140737488351231,
+SNULL, 140735281643519, 140737488351231,
+STORE, 140735281639424, 140735281643519,
+STORE, 140735281508352, 140735281643519,
+STORE, 94717834911744, 94717834928127,
+SNULL, 94717834915839, 94717834928127,
+STORE, 94717834911744, 94717834915839,
+STORE, 94717834915840, 94717834928127,
+ERASE, 94717834915840, 94717834928127,
+STORE, 94717834919936, 94717834928127,
+STORE, 140428246065152, 140428248317951,
+SNULL, 140428246208511, 140428248317951,
+STORE, 140428246065152, 140428246208511,
+STORE, 140428246208512, 140428248317951,
+ERASE, 140428246208512, 140428248317951,
+STORE, 140428248305664, 140428248313855,
+STORE, 140428248313856, 140428248317951,
+STORE, 140735281811456, 140735281815551,
+STORE, 140735281799168, 140735281811455,
+STORE, 140428248297472, 140428248305663,
+STORE, 140428243841024, 140428246065151,
+SNULL, 140428245491711, 140428246065151,
+STORE, 140428243841024, 140428245491711,
+STORE, 140428245491712, 140428246065151,
+SNULL, 140428245491712, 140428246061055,
+STORE, 140428246061056, 140428246065151,
+STORE, 140428245491712, 140428246061055,
+ERASE, 140428245491712, 140428246061055,
+STORE, 140428245491712, 140428246061055,
+ERASE, 140428246061056, 140428246065151,
+STORE, 140428246061056, 140428246065151,
+STORE, 140428248268800, 140428248297471,
+STORE, 140428241625088, 140428243841023,
+SNULL, 140428241625088, 140428241723391,
+STORE, 140428241723392, 140428243841023,
+STORE, 140428241625088, 140428241723391,
+SNULL, 140428243816447, 140428243841023,
+STORE, 140428241723392, 140428243816447,
+STORE, 140428243816448, 140428243841023,
+SNULL, 140428243816448, 140428243824639,
+STORE, 140428243824640, 140428243841023,
+STORE, 140428243816448, 140428243824639,
+ERASE, 140428243816448, 140428243824639,
+STORE, 140428243816448, 140428243824639,
+ERASE, 140428243824640, 140428243841023,
+STORE, 140428243824640, 140428243841023,
+STORE, 140428237828096, 140428241625087,
+SNULL, 140428237828096, 140428239486975,
+STORE, 140428239486976, 140428241625087,
+STORE, 140428237828096, 140428239486975,
+SNULL, 140428241584127, 140428241625087,
+STORE, 140428239486976, 140428241584127,
+STORE, 140428241584128, 140428241625087,
+SNULL, 140428241584128, 140428241608703,
+STORE, 140428241608704, 140428241625087,
+STORE, 140428241584128, 140428241608703,
+ERASE, 140428241584128, 140428241608703,
+STORE, 140428241584128, 140428241608703,
+ERASE, 140428241608704, 140428241625087,
+STORE, 140428241608704, 140428241625087,
+STORE, 140428235567104, 140428237828095,
+SNULL, 140428235567104, 140428235718655,
+STORE, 140428235718656, 140428237828095,
+STORE, 140428235567104, 140428235718655,
+SNULL, 140428237811711, 140428237828095,
+STORE, 140428235718656, 140428237811711,
+STORE, 140428237811712, 140428237828095,
+SNULL, 140428237811712, 140428237819903,
+STORE, 140428237819904, 140428237828095,
+STORE, 140428237811712, 140428237819903,
+ERASE, 140428237811712, 140428237819903,
+STORE, 140428237811712, 140428237819903,
+ERASE, 140428237819904, 140428237828095,
+STORE, 140428237819904, 140428237828095,
+STORE, 140428233445376, 140428235567103,
+SNULL, 140428233445376, 140428233461759,
+STORE, 140428233461760, 140428235567103,
+STORE, 140428233445376, 140428233461759,
+SNULL, 140428235558911, 140428235567103,
+STORE, 140428233461760, 140428235558911,
+STORE, 140428235558912, 140428235567103,
+ERASE, 140428235558912, 140428235567103,
+STORE, 140428235558912, 140428235567103,
+STORE, 140428231315456, 140428233445375,
+SNULL, 140428231315456, 140428231344127,
+STORE, 140428231344128, 140428233445375,
+STORE, 140428231315456, 140428231344127,
+SNULL, 140428233437183, 140428233445375,
+STORE, 140428231344128, 140428233437183,
+STORE, 140428233437184, 140428233445375,
+ERASE, 140428233437184, 140428233445375,
+STORE, 140428233437184, 140428233445375,
+STORE, 140428248260608, 140428248268799,
+STORE, 140428229062656, 140428231315455,
+SNULL, 140428229062656, 140428229214207,
+STORE, 140428229214208, 140428231315455,
+STORE, 140428229062656, 140428229214207,
+SNULL, 140428231307263, 140428231315455,
+STORE, 140428229214208, 140428231307263,
+STORE, 140428231307264, 140428231315455,
+ERASE, 140428231307264, 140428231315455,
+STORE, 140428231307264, 140428231315455,
+STORE, 140428226891776, 140428229062655,
+SNULL, 140428226891776, 140428226961407,
+STORE, 140428226961408, 140428229062655,
+STORE, 140428226891776, 140428226961407,
+SNULL, 140428229054463, 140428229062655,
+STORE, 140428226961408, 140428229054463,
+STORE, 140428229054464, 140428229062655,
+ERASE, 140428229054464, 140428229062655,
+STORE, 140428229054464, 140428229062655,
+STORE, 140428223680512, 140428226891775,
+SNULL, 140428223680512, 140428224757759,
+STORE, 140428224757760, 140428226891775,
+STORE, 140428223680512, 140428224757759,
+SNULL, 140428226854911, 140428226891775,
+STORE, 140428224757760, 140428226854911,
+STORE, 140428226854912, 140428226891775,
+ERASE, 140428226854912, 140428226891775,
+STORE, 140428226854912, 140428226891775,
+STORE, 140428221546496, 140428223680511,
+SNULL, 140428221546496, 140428221575167,
+STORE, 140428221575168, 140428223680511,
+STORE, 140428221546496, 140428221575167,
+SNULL, 140428223672319, 140428223680511,
+STORE, 140428221575168, 140428223672319,
+STORE, 140428223672320, 140428223680511,
+ERASE, 140428223672320, 140428223680511,
+STORE, 140428223672320, 140428223680511,
+STORE, 140428219236352, 140428221546495,
+SNULL, 140428219236352, 140428219441151,
+STORE, 140428219441152, 140428221546495,
+STORE, 140428219236352, 140428219441151,
+SNULL, 140428221538303, 140428221546495,
+STORE, 140428219441152, 140428221538303,
+STORE, 140428221538304, 140428221546495,
+ERASE, 140428221538304, 140428221546495,
+STORE, 140428221538304, 140428221546495,
+STORE, 140428216852480, 140428219236351,
+SNULL, 140428216852480, 140428217044991,
+STORE, 140428217044992, 140428219236351,
+STORE, 140428216852480, 140428217044991,
+SNULL, 140428219138047, 140428219236351,
+STORE, 140428217044992, 140428219138047,
+STORE, 140428219138048, 140428219236351,
+ERASE, 140428219138048, 140428219236351,
+STORE, 140428219138048, 140428219236351,
+STORE, 140428248252416, 140428248268799,
+STORE, 140428214284288, 140428216852479,
+SNULL, 140428214284288, 140428214751231,
+STORE, 140428214751232, 140428216852479,
+STORE, 140428214284288, 140428214751231,
+SNULL, 140428216844287, 140428216852479,
+STORE, 140428214751232, 140428216844287,
+STORE, 140428216844288, 140428216852479,
+ERASE, 140428216844288, 140428216852479,
+STORE, 140428216844288, 140428216852479,
+STORE, 140428212170752, 140428214284287,
+SNULL, 140428212170752, 140428212183039,
+STORE, 140428212183040, 140428214284287,
+STORE, 140428212170752, 140428212183039,
+SNULL, 140428214276095, 140428214284287,
+STORE, 140428212183040, 140428214276095,
+STORE, 140428214276096, 140428214284287,
+ERASE, 140428214276096, 140428214284287,
+STORE, 140428214276096, 140428214284287,
+STORE, 140428209991680, 140428212170751,
+SNULL, 140428209991680, 140428210069503,
+STORE, 140428210069504, 140428212170751,
+STORE, 140428209991680, 140428210069503,
+SNULL, 140428212162559, 140428212170751,
+STORE, 140428210069504, 140428212162559,
+STORE, 140428212162560, 140428212170751,
+ERASE, 140428212162560, 140428212170751,
+STORE, 140428212162560, 140428212170751,
+STORE, 140428207874048, 140428209991679,
+SNULL, 140428207874048, 140428207890431,
+STORE, 140428207890432, 140428209991679,
+STORE, 140428207874048, 140428207890431,
+SNULL, 140428209983487, 140428209991679,
+STORE, 140428207890432, 140428209983487,
+STORE, 140428209983488, 140428209991679,
+ERASE, 140428209983488, 140428209991679,
+STORE, 140428209983488, 140428209991679,
+STORE, 140428248244224, 140428248268799,
+STORE, 140428248231936, 140428248268799,
+SNULL, 140428241600511, 140428241608703,
+STORE, 140428241584128, 140428241600511,
+STORE, 140428241600512, 140428241608703,
+SNULL, 140428209987583, 140428209991679,
+STORE, 140428209983488, 140428209987583,
+STORE, 140428209987584, 140428209991679,
+SNULL, 140428212166655, 140428212170751,
+STORE, 140428212162560, 140428212166655,
+STORE, 140428212166656, 140428212170751,
+SNULL, 140428214280191, 140428214284287,
+STORE, 140428214276096, 140428214280191,
+STORE, 140428214280192, 140428214284287,
+SNULL, 140428243820543, 140428243824639,
+STORE, 140428243816448, 140428243820543,
+STORE, 140428243820544, 140428243824639,
+SNULL, 140428216848383, 140428216852479,
+STORE, 140428216844288, 140428216848383,
+STORE, 140428216848384, 140428216852479,
+SNULL, 140428219232255, 140428219236351,
+STORE, 140428219138048, 140428219232255,
+STORE, 140428219232256, 140428219236351,
+SNULL, 140428221542399, 140428221546495,
+STORE, 140428221538304, 140428221542399,
+STORE, 140428221542400, 140428221546495,
+SNULL, 140428223676415, 140428223680511,
+STORE, 140428223672320, 140428223676415,
+STORE, 140428223676416, 140428223680511,
+SNULL, 140428226863103, 140428226891775,
+STORE, 140428226854912, 140428226863103,
+STORE, 140428226863104, 140428226891775,
+SNULL, 140428229058559, 140428229062655,
+STORE, 140428229054464, 140428229058559,
+STORE, 140428229058560, 140428229062655,
+SNULL, 140428231311359, 140428231315455,
+STORE, 140428231307264, 140428231311359,
+STORE, 140428231311360, 140428231315455,
+SNULL, 140428233441279, 140428233445375,
+STORE, 140428233437184, 140428233441279,
+STORE, 140428233441280, 140428233445375,
+SNULL, 140428235563007, 140428235567103,
+STORE, 140428235558912, 140428235563007,
+STORE, 140428235563008, 140428235567103,
+SNULL, 140428237815807, 140428237819903,
+STORE, 140428237811712, 140428237815807,
+STORE, 140428237815808, 140428237819903,
+SNULL, 140428246056959, 140428246061055,
+STORE, 140428245491712, 140428246056959,
+STORE, 140428246056960, 140428246061055,
+SNULL, 94717834924031, 94717834928127,
+STORE, 94717834919936, 94717834924031,
+STORE, 94717834924032, 94717834928127,
+SNULL, 140428248309759, 140428248313855,
+STORE, 140428248305664, 140428248309759,
+STORE, 140428248309760, 140428248313855,
+ERASE, 140428248268800, 140428248297471,
+STORE, 94717843058688, 94717843193855,
+STORE, 94749677137920, 94749677559807,
+STORE, 94749677563904, 94749677604863,
+STORE, 94749677604864, 94749677608959,
+STORE, 94749710970880, 94749711241215,
+STORE, 140490884894720, 140490884935679,
+STORE, 140490884935680, 140490887032831,
+STORE, 140490887032832, 140490887036927,
+STORE, 140490887036928, 140490887041023,
+STORE, 140490887041024, 140490887065599,
+STORE, 140490887065600, 140490887110655,
+STORE, 140490887110656, 140490889203711,
+STORE, 140490889203712, 140490889207807,
+STORE, 140490889207808, 140490889211903,
+STORE, 140490889211904, 140490889293823,
+STORE, 140490889293824, 140490891390975,
+STORE, 140490891390976, 140490891395071,
+STORE, 140490891395072, 140490891399167,
+STORE, 140490891399168, 140490891407359,
+STORE, 140490891407360, 140490891436031,
+STORE, 140490891436032, 140490893529087,
+STORE, 140490893529088, 140490893533183,
+STORE, 140490893533184, 140490893537279,
+STORE, 140490893537280, 140490901979135,
+STORE, 140490901979136, 140490901991423,
+STORE, 140490901991424, 140490904084479,
+STORE, 140490904084480, 140490904088575,
+STORE, 140490904088576, 140490904092671,
+STORE, 140490904092672, 140490904559615,
+STORE, 140490904559616, 140490906652671,
+STORE, 140490906652672, 140490906656767,
+STORE, 140490906656768, 140490906660863,
+STORE, 140490906660864, 140490906677247,
+STORE, 140490906677248, 140490908770303,
+STORE, 140490908770304, 140490908774399,
+STORE, 140490908774400, 140490908778495,
+STORE, 140490908778496, 140490908794879,
+STORE, 140490908794880, 140490910887935,
+STORE, 140490910887936, 140490910892031,
+STORE, 140490910892032, 140490910896127,
+STORE, 140490910896128, 140490912555007,
+STORE, 140490912555008, 140490914652159,
+STORE, 140490914652160, 140490914668543,
+STORE, 140490914668544, 140490914676735,
+STORE, 140490914676736, 140490914693119,
+STORE, 140490914693120, 140490914791423,
+STORE, 140490914791424, 140490916884479,
+STORE, 140490916884480, 140490916888575,
+STORE, 140490916888576, 140490916892671,
+STORE, 140490916892672, 140490916909055,
+STORE, 140490916909056, 140490916937727,
+STORE, 140490916937728, 140490919030783,
+STORE, 140490919030784, 140490919034879,
+STORE, 140490919034880, 140490919038975,
+STORE, 140490919038976, 140490919190527,
+STORE, 140490919190528, 140490921283583,
+STORE, 140490921283584, 140490921287679,
+STORE, 140490921287680, 140490921291775,
+STORE, 140490921291776, 140490921299967,
+STORE, 140490921299968, 140490921390079,
+STORE, 140490921390080, 140490923483135,
+STORE, 140490923483136, 140490923487231,
+STORE, 140490923487232, 140490923491327,
+STORE, 140490923491328, 140490923757567,
+STORE, 140490923757568, 140490925850623,
+STORE, 140490925850624, 140490925867007,
+STORE, 140490925867008, 140490925871103,
+STORE, 140490925871104, 140490925875199,
+STORE, 140490925875200, 140490925903871,
+STORE, 140490925903872, 140490928001023,
+STORE, 140490928001024, 140490928005119,
+STORE, 140490928005120, 140490928009215,
+STORE, 140490928009216, 140490928152575,
+STORE, 140490930184192, 140490930221055,
+STORE, 140490930221056, 140490930237439,
+STORE, 140490930237440, 140490930241535,
+STORE, 140490930241536, 140490930245631,
+STORE, 140490930245632, 140490930249727,
+STORE, 140490930249728, 140490930253823,
+STORE, 140490930253824, 140490930257919,
+STORE, 140490930257920, 140490930262015,
+STORE, 140724611694592, 140724611829759,
+STORE, 140724612427776, 140724612440063,
+STORE, 140724612440064, 140724612444159,
+STORE, 94103163662336, 94103163772927,
+STORE, 94103165865984, 94103165874175,
+STORE, 94103165874176, 94103165878271,
+STORE, 94103165878272, 94103165886463,
+STORE, 94103182548992, 94103182684159,
+STORE, 140092694708224, 140092696367103,
+STORE, 140092696367104, 140092698464255,
+STORE, 140092698464256, 140092698480639,
+STORE, 140092698480640, 140092698488831,
+STORE, 140092698488832, 140092698505215,
+STORE, 140092698505216, 140092698648575,
+STORE, 140092700708864, 140092700717055,
+STORE, 140092700745728, 140092700749823,
+STORE, 140092700749824, 140092700753919,
+STORE, 140092700753920, 140092700758015,
+STORE, 140736800911360, 140736801046527,
+STORE, 140736802308096, 140736802320383,
+STORE, 140736802320384, 140736802324479,
+STORE, 93948802064384, 93948802174975,
+STORE, 93948804268032, 93948804276223,
+STORE, 93948804276224, 93948804280319,
+STORE, 93948804280320, 93948804288511,
+STORE, 93948806266880, 93948806402047,
+STORE, 140222999113728, 140223000772607,
+STORE, 140223000772608, 140223002869759,
+STORE, 140223002869760, 140223002886143,
+STORE, 140223002886144, 140223002894335,
+STORE, 140223002894336, 140223002910719,
+STORE, 140223002910720, 140223003054079,
+STORE, 140223005114368, 140223005122559,
+STORE, 140223005151232, 140223005155327,
+STORE, 140223005155328, 140223005159423,
+STORE, 140223005159424, 140223005163519,
+STORE, 140720877506560, 140720877641727,
+STORE, 140720878231552, 140720878243839,
+STORE, 140720878243840, 140720878247935,
+STORE, 140737488347136, 140737488351231,
+STORE, 140733232087040, 140737488351231,
+SNULL, 140733232091135, 140737488351231,
+STORE, 140733232087040, 140733232091135,
+STORE, 140733231955968, 140733232091135,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140161681321984, 140161683574783,
+SNULL, 140161681465343, 140161683574783,
+STORE, 140161681321984, 140161681465343,
+STORE, 140161681465344, 140161683574783,
+ERASE, 140161681465344, 140161683574783,
+STORE, 140161683562496, 140161683570687,
+STORE, 140161683570688, 140161683574783,
+STORE, 140733232214016, 140733232218111,
+STORE, 140733232201728, 140733232214015,
+STORE, 140161683533824, 140161683562495,
+STORE, 140161683525632, 140161683533823,
+STORE, 140161678159872, 140161681321983,
+SNULL, 140161678159872, 140161679220735,
+STORE, 140161679220736, 140161681321983,
+STORE, 140161678159872, 140161679220735,
+SNULL, 140161681313791, 140161681321983,
+STORE, 140161679220736, 140161681313791,
+STORE, 140161681313792, 140161681321983,
+ERASE, 140161681313792, 140161681321983,
+STORE, 140161681313792, 140161681321983,
+STORE, 140161674362880, 140161678159871,
+SNULL, 140161674362880, 140161676021759,
+STORE, 140161676021760, 140161678159871,
+STORE, 140161674362880, 140161676021759,
+SNULL, 140161678118911, 140161678159871,
+STORE, 140161676021760, 140161678118911,
+STORE, 140161678118912, 140161678159871,
+SNULL, 140161678118912, 140161678143487,
+STORE, 140161678143488, 140161678159871,
+STORE, 140161678118912, 140161678143487,
+ERASE, 140161678118912, 140161678143487,
+STORE, 140161678118912, 140161678143487,
+ERASE, 140161678143488, 140161678159871,
+STORE, 140161678143488, 140161678159871,
+STORE, 140161683513344, 140161683533823,
+SNULL, 140161678135295, 140161678143487,
+STORE, 140161678118912, 140161678135295,
+STORE, 140161678135296, 140161678143487,
+SNULL, 140161681317887, 140161681321983,
+STORE, 140161681313792, 140161681317887,
+STORE, 140161681317888, 140161681321983,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140161683566591, 140161683570687,
+STORE, 140161683562496, 140161683566591,
+STORE, 140161683566592, 140161683570687,
+ERASE, 140161683533824, 140161683562495,
+STORE, 25477120, 25612287,
+STORE, 25477120, 25759743,
+STORE, 140161681829888, 140161683513343,
+STORE, 25477120, 25915391,
+STORE, 25477120, 26054655,
+SNULL, 25800703, 26054655,
+STORE, 25477120, 25800703,
+STORE, 25800704, 26054655,
+ERASE, 25800704, 26054655,
+STORE, 140737488347136, 140737488351231,
+STORE, 140723218452480, 140737488351231,
+SNULL, 140723218456575, 140737488351231,
+STORE, 140723218452480, 140723218456575,
+STORE, 140723218321408, 140723218456575,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140398872264704, 140398874517503,
+SNULL, 140398872408063, 140398874517503,
+STORE, 140398872264704, 140398872408063,
+STORE, 140398872408064, 140398874517503,
+ERASE, 140398872408064, 140398874517503,
+STORE, 140398874505216, 140398874513407,
+STORE, 140398874513408, 140398874517503,
+STORE, 140723219247104, 140723219251199,
+STORE, 140723219234816, 140723219247103,
+STORE, 140398874476544, 140398874505215,
+STORE, 140398874468352, 140398874476543,
+STORE, 140398868430848, 140398872264703,
+SNULL, 140398868430848, 140398870138879,
+STORE, 140398870138880, 140398872264703,
+STORE, 140398868430848, 140398870138879,
+SNULL, 140398872231935, 140398872264703,
+STORE, 140398870138880, 140398872231935,
+STORE, 140398872231936, 140398872264703,
+ERASE, 140398872231936, 140398872264703,
+STORE, 140398872231936, 140398872264703,
+STORE, 140398866235392, 140398868430847,
+SNULL, 140398866235392, 140398866329599,
+STORE, 140398866329600, 140398868430847,
+STORE, 140398866235392, 140398866329599,
+SNULL, 140398868422655, 140398868430847,
+STORE, 140398866329600, 140398868422655,
+STORE, 140398868422656, 140398868430847,
+ERASE, 140398868422656, 140398868430847,
+STORE, 140398868422656, 140398868430847,
+STORE, 140398863716352, 140398866235391,
+SNULL, 140398863716352, 140398864130047,
+STORE, 140398864130048, 140398866235391,
+STORE, 140398863716352, 140398864130047,
+SNULL, 140398866223103, 140398866235391,
+STORE, 140398864130048, 140398866223103,
+STORE, 140398866223104, 140398866235391,
+ERASE, 140398866223104, 140398866235391,
+STORE, 140398866223104, 140398866235391,
+STORE, 140398861082624, 140398863716351,
+SNULL, 140398861082624, 140398861611007,
+STORE, 140398861611008, 140398863716351,
+STORE, 140398861082624, 140398861611007,
+SNULL, 140398863708159, 140398863716351,
+STORE, 140398861611008, 140398863708159,
+STORE, 140398863708160, 140398863716351,
+ERASE, 140398863708160, 140398863716351,
+STORE, 140398863708160, 140398863716351,
+STORE, 140398858969088, 140398861082623,
+SNULL, 140398858969088, 140398858981375,
+STORE, 140398858981376, 140398861082623,
+STORE, 140398858969088, 140398858981375,
+SNULL, 140398861074431, 140398861082623,
+STORE, 140398858981376, 140398861074431,
+STORE, 140398861074432, 140398861082623,
+ERASE, 140398861074432, 140398861082623,
+STORE, 140398861074432, 140398861082623,
+STORE, 140398856765440, 140398858969087,
+SNULL, 140398856765440, 140398856867839,
+STORE, 140398856867840, 140398858969087,
+STORE, 140398856765440, 140398856867839,
+SNULL, 140398858960895, 140398858969087,
+STORE, 140398856867840, 140398858960895,
+STORE, 140398858960896, 140398858969087,
+ERASE, 140398858960896, 140398858969087,
+STORE, 140398858960896, 140398858969087,
+STORE, 140398874460160, 140398874476543,
+STORE, 140398853603328, 140398856765439,
+SNULL, 140398853603328, 140398854664191,
+STORE, 140398854664192, 140398856765439,
+STORE, 140398853603328, 140398854664191,
+SNULL, 140398856757247, 140398856765439,
+STORE, 140398854664192, 140398856757247,
+STORE, 140398856757248, 140398856765439,
+ERASE, 140398856757248, 140398856765439,
+STORE, 140398856757248, 140398856765439,
+STORE, 140398849806336, 140398853603327,
+SNULL, 140398849806336, 140398851465215,
+STORE, 140398851465216, 140398853603327,
+STORE, 140398849806336, 140398851465215,
+SNULL, 140398853562367, 140398853603327,
+STORE, 140398851465216, 140398853562367,
+STORE, 140398853562368, 140398853603327,
+SNULL, 140398853562368, 140398853586943,
+STORE, 140398853586944, 140398853603327,
+STORE, 140398853562368, 140398853586943,
+ERASE, 140398853562368, 140398853586943,
+STORE, 140398853562368, 140398853586943,
+ERASE, 140398853586944, 140398853603327,
+STORE, 140398853586944, 140398853603327,
+STORE, 140398874447872, 140398874476543,
+SNULL, 140398853578751, 140398853586943,
+STORE, 140398853562368, 140398853578751,
+STORE, 140398853578752, 140398853586943,
+SNULL, 140398856761343, 140398856765439,
+STORE, 140398856757248, 140398856761343,
+STORE, 140398856761344, 140398856765439,
+SNULL, 140398858964991, 140398858969087,
+STORE, 140398858960896, 140398858964991,
+STORE, 140398858964992, 140398858969087,
+SNULL, 140398861078527, 140398861082623,
+STORE, 140398861074432, 140398861078527,
+STORE, 140398861078528, 140398861082623,
+SNULL, 140398863712255, 140398863716351,
+STORE, 140398863708160, 140398863712255,
+STORE, 140398863712256, 140398863716351,
+SNULL, 140398866231295, 140398866235391,
+STORE, 140398866223104, 140398866231295,
+STORE, 140398866231296, 140398866235391,
+SNULL, 140398868426751, 140398868430847,
+STORE, 140398868422656, 140398868426751,
+STORE, 140398868426752, 140398868430847,
+SNULL, 140398872236031, 140398872264703,
+STORE, 140398872231936, 140398872236031,
+STORE, 140398872236032, 140398872264703,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140398874509311, 140398874513407,
+STORE, 140398874505216, 140398874509311,
+STORE, 140398874509312, 140398874513407,
+ERASE, 140398874476544, 140398874505215,
+STORE, 43278336, 43413503,
+STORE, 140398872764416, 140398874447871,
+STORE, 140398874501120, 140398874505215,
+STORE, 140398872629248, 140398872764415,
+STORE, 43278336, 43556863,
+STORE, 140398847709184, 140398849806335,
+STORE, 140398874492928, 140398874505215,
+STORE, 140398874484736, 140398874505215,
+STORE, 140398874447872, 140398874484735,
+STORE, 140398872612864, 140398872764415,
+STORE, 43278336, 43692031,
+STORE, 43278336, 43880447,
+STORE, 140398872604672, 140398872764415,
+STORE, 140398872596480, 140398872764415,
+STORE, 43278336, 44044287,
+STORE, 140398872580096, 140398872764415,
+STORE, 140737488347136, 140737488351231,
+STORE, 140734403092480, 140737488351231,
+SNULL, 140734403096575, 140737488351231,
+STORE, 140734403092480, 140734403096575,
+STORE, 140734402961408, 140734403096575,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140240662380544, 140240664633343,
+SNULL, 140240662523903, 140240664633343,
+STORE, 140240662380544, 140240662523903,
+STORE, 140240662523904, 140240664633343,
+ERASE, 140240662523904, 140240664633343,
+STORE, 140240664621056, 140240664629247,
+STORE, 140240664629248, 140240664633343,
+STORE, 140734403145728, 140734403149823,
+STORE, 140734403133440, 140734403145727,
+STORE, 140240664592384, 140240664621055,
+STORE, 140240664584192, 140240664592383,
+STORE, 140240659218432, 140240662380543,
+SNULL, 140240659218432, 140240660279295,
+STORE, 140240660279296, 140240662380543,
+STORE, 140240659218432, 140240660279295,
+SNULL, 140240662372351, 140240662380543,
+STORE, 140240660279296, 140240662372351,
+STORE, 140240662372352, 140240662380543,
+ERASE, 140240662372352, 140240662380543,
+STORE, 140240662372352, 140240662380543,
+STORE, 140240655421440, 140240659218431,
+SNULL, 140240655421440, 140240657080319,
+STORE, 140240657080320, 140240659218431,
+STORE, 140240655421440, 140240657080319,
+SNULL, 140240659177471, 140240659218431,
+STORE, 140240657080320, 140240659177471,
+STORE, 140240659177472, 140240659218431,
+SNULL, 140240659177472, 140240659202047,
+STORE, 140240659202048, 140240659218431,
+STORE, 140240659177472, 140240659202047,
+ERASE, 140240659177472, 140240659202047,
+STORE, 140240659177472, 140240659202047,
+ERASE, 140240659202048, 140240659218431,
+STORE, 140240659202048, 140240659218431,
+STORE, 140240664571904, 140240664592383,
+SNULL, 140240659193855, 140240659202047,
+STORE, 140240659177472, 140240659193855,
+STORE, 140240659193856, 140240659202047,
+SNULL, 140240662376447, 140240662380543,
+STORE, 140240662372352, 140240662376447,
+STORE, 140240662376448, 140240662380543,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140240664625151, 140240664629247,
+STORE, 140240664621056, 140240664625151,
+STORE, 140240664625152, 140240664629247,
+ERASE, 140240664592384, 140240664621055,
+STORE, 30646272, 30781439,
+STORE, 30646272, 30928895,
+STORE, 140240662888448, 140240664571903,
+STORE, 94256659468288, 94256659578879,
+STORE, 94256661671936, 94256661680127,
+STORE, 94256661680128, 94256661684223,
+STORE, 94256661684224, 94256661692415,
+STORE, 94256687980544, 94256688115711,
+STORE, 139801712504832, 139801714163711,
+STORE, 139801714163712, 139801716260863,
+STORE, 139801716260864, 139801716277247,
+STORE, 139801716277248, 139801716285439,
+STORE, 139801716285440, 139801716301823,
+STORE, 139801716301824, 139801716445183,
+STORE, 139801718505472, 139801718513663,
+STORE, 139801718542336, 139801718546431,
+STORE, 139801718546432, 139801718550527,
+STORE, 139801718550528, 139801718554623,
+STORE, 140721575538688, 140721575673855,
+STORE, 140721577013248, 140721577025535,
+STORE, 140721577025536, 140721577029631,
+STORE, 140737488347136, 140737488351231,
+STORE, 140729259393024, 140737488351231,
+SNULL, 140729259397119, 140737488351231,
+STORE, 140729259393024, 140729259397119,
+STORE, 140729259261952, 140729259397119,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 139682376638464, 139682378891263,
+SNULL, 139682376781823, 139682378891263,
+STORE, 139682376638464, 139682376781823,
+STORE, 139682376781824, 139682378891263,
+ERASE, 139682376781824, 139682378891263,
+STORE, 139682378878976, 139682378887167,
+STORE, 139682378887168, 139682378891263,
+STORE, 140729260462080, 140729260466175,
+STORE, 140729260449792, 140729260462079,
+STORE, 139682378850304, 139682378878975,
+STORE, 139682378842112, 139682378850303,
+STORE, 139682373476352, 139682376638463,
+SNULL, 139682373476352, 139682374537215,
+STORE, 139682374537216, 139682376638463,
+STORE, 139682373476352, 139682374537215,
+SNULL, 139682376630271, 139682376638463,
+STORE, 139682374537216, 139682376630271,
+STORE, 139682376630272, 139682376638463,
+ERASE, 139682376630272, 139682376638463,
+STORE, 139682376630272, 139682376638463,
+STORE, 139682369679360, 139682373476351,
+SNULL, 139682369679360, 139682371338239,
+STORE, 139682371338240, 139682373476351,
+STORE, 139682369679360, 139682371338239,
+SNULL, 139682373435391, 139682373476351,
+STORE, 139682371338240, 139682373435391,
+STORE, 139682373435392, 139682373476351,
+SNULL, 139682373435392, 139682373459967,
+STORE, 139682373459968, 139682373476351,
+STORE, 139682373435392, 139682373459967,
+ERASE, 139682373435392, 139682373459967,
+STORE, 139682373435392, 139682373459967,
+ERASE, 139682373459968, 139682373476351,
+STORE, 139682373459968, 139682373476351,
+STORE, 139682378829824, 139682378850303,
+SNULL, 139682373451775, 139682373459967,
+STORE, 139682373435392, 139682373451775,
+STORE, 139682373451776, 139682373459967,
+SNULL, 139682376634367, 139682376638463,
+STORE, 139682376630272, 139682376634367,
+STORE, 139682376634368, 139682376638463,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 139682378883071, 139682378887167,
+STORE, 139682378878976, 139682378883071,
+STORE, 139682378883072, 139682378887167,
+ERASE, 139682378850304, 139682378878975,
+STORE, 10022912, 10158079,
+STORE, 10022912, 10305535,
+STORE, 139682377146368, 139682378829823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140731831926784, 140737488351231,
+SNULL, 140731831930879, 140737488351231,
+STORE, 140731831926784, 140731831930879,
+STORE, 140731831795712, 140731831930879,
+STORE, 94615305261056, 94615307485183,
+SNULL, 94615305371647, 94615307485183,
+STORE, 94615305261056, 94615305371647,
+STORE, 94615305371648, 94615307485183,
+ERASE, 94615305371648, 94615307485183,
+STORE, 94615307464704, 94615307476991,
+STORE, 94615307476992, 94615307485183,
+STORE, 140163912994816, 140163915247615,
+SNULL, 140163913138175, 140163915247615,
+STORE, 140163912994816, 140163913138175,
+STORE, 140163913138176, 140163915247615,
+ERASE, 140163913138176, 140163915247615,
+STORE, 140163915235328, 140163915243519,
+STORE, 140163915243520, 140163915247615,
+STORE, 140731832217600, 140731832221695,
+STORE, 140731832205312, 140731832217599,
+STORE, 140163915206656, 140163915235327,
+STORE, 140163915198464, 140163915206655,
+STORE, 140163909197824, 140163912994815,
+SNULL, 140163909197824, 140163910856703,
+STORE, 140163910856704, 140163912994815,
+STORE, 140163909197824, 140163910856703,
+SNULL, 140163912953855, 140163912994815,
+STORE, 140163910856704, 140163912953855,
+STORE, 140163912953856, 140163912994815,
+SNULL, 140163912953856, 140163912978431,
+STORE, 140163912978432, 140163912994815,
+STORE, 140163912953856, 140163912978431,
+ERASE, 140163912953856, 140163912978431,
+STORE, 140163912953856, 140163912978431,
+ERASE, 140163912978432, 140163912994815,
+STORE, 140163912978432, 140163912994815,
+SNULL, 140163912970239, 140163912978431,
+STORE, 140163912953856, 140163912970239,
+STORE, 140163912970240, 140163912978431,
+SNULL, 94615307472895, 94615307476991,
+STORE, 94615307464704, 94615307472895,
+STORE, 94615307472896, 94615307476991,
+SNULL, 140163915239423, 140163915243519,
+STORE, 140163915235328, 140163915239423,
+STORE, 140163915239424, 140163915243519,
+ERASE, 140163915206656, 140163915235327,
+STORE, 94615330672640, 94615330807807,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725254479872, 140737488351231,
+SNULL, 140725254488063, 140737488351231,
+STORE, 140725254479872, 140725254488063,
+STORE, 140725254348800, 140725254488063,
+STORE, 94572781277184, 94572785741823,
+SNULL, 94572783312895, 94572785741823,
+STORE, 94572781277184, 94572783312895,
+STORE, 94572783312896, 94572785741823,
+ERASE, 94572783312896, 94572785741823,
+STORE, 94572785405952, 94572785455103,
+STORE, 94572785455104, 94572785741823,
+STORE, 139636001341440, 139636003594239,
+SNULL, 139636001484799, 139636003594239,
+STORE, 139636001341440, 139636001484799,
+STORE, 139636001484800, 139636003594239,
+ERASE, 139636001484800, 139636003594239,
+STORE, 139636003581952, 139636003590143,
+STORE, 139636003590144, 139636003594239,
+STORE, 140725255557120, 140725255561215,
+STORE, 140725255544832, 140725255557119,
+STORE, 139636003553280, 139636003581951,
+STORE, 139636003545088, 139636003553279,
+STORE, 139635998773248, 139636001341439,
+SNULL, 139635998773248, 139635999240191,
+STORE, 139635999240192, 139636001341439,
+STORE, 139635998773248, 139635999240191,
+SNULL, 139636001333247, 139636001341439,
+STORE, 139635999240192, 139636001333247,
+STORE, 139636001333248, 139636001341439,
+ERASE, 139636001333248, 139636001341439,
+STORE, 139636001333248, 139636001341439,
+STORE, 139635996569600, 139635998773247,
+SNULL, 139635996569600, 139635996671999,
+STORE, 139635996672000, 139635998773247,
+STORE, 139635996569600, 139635996671999,
+SNULL, 139635998765055, 139635998773247,
+STORE, 139635996672000, 139635998765055,
+STORE, 139635998765056, 139635998773247,
+ERASE, 139635998765056, 139635998773247,
+STORE, 139635998765056, 139635998773247,
+STORE, 139635994353664, 139635996569599,
+SNULL, 139635994353664, 139635994451967,
+STORE, 139635994451968, 139635996569599,
+STORE, 139635994353664, 139635994451967,
+SNULL, 139635996545023, 139635996569599,
+STORE, 139635994451968, 139635996545023,
+STORE, 139635996545024, 139635996569599,
+SNULL, 139635996545024, 139635996553215,
+STORE, 139635996553216, 139635996569599,
+STORE, 139635996545024, 139635996553215,
+ERASE, 139635996545024, 139635996553215,
+STORE, 139635996545024, 139635996553215,
+ERASE, 139635996553216, 139635996569599,
+STORE, 139635996553216, 139635996569599,
+STORE, 139635992223744, 139635994353663,
+SNULL, 139635992223744, 139635992252415,
+STORE, 139635992252416, 139635994353663,
+STORE, 139635992223744, 139635992252415,
+SNULL, 139635994345471, 139635994353663,
+STORE, 139635992252416, 139635994345471,
+STORE, 139635994345472, 139635994353663,
+ERASE, 139635994345472, 139635994353663,
+STORE, 139635994345472, 139635994353663,
+STORE, 139635988426752, 139635992223743,
+SNULL, 139635988426752, 139635990085631,
+STORE, 139635990085632, 139635992223743,
+STORE, 139635988426752, 139635990085631,
+SNULL, 139635992182783, 139635992223743,
+STORE, 139635990085632, 139635992182783,
+STORE, 139635992182784, 139635992223743,
+SNULL, 139635992182784, 139635992207359,
+STORE, 139635992207360, 139635992223743,
+STORE, 139635992182784, 139635992207359,
+ERASE, 139635992182784, 139635992207359,
+STORE, 139635992182784, 139635992207359,
+ERASE, 139635992207360, 139635992223743,
+STORE, 139635992207360, 139635992223743,
+STORE, 139636003536896, 139636003553279,
+SNULL, 139635992199167, 139635992207359,
+STORE, 139635992182784, 139635992199167,
+STORE, 139635992199168, 139635992207359,
+SNULL, 139635996549119, 139635996553215,
+STORE, 139635996545024, 139635996549119,
+STORE, 139635996549120, 139635996553215,
+SNULL, 139635994349567, 139635994353663,
+STORE, 139635994345472, 139635994349567,
+STORE, 139635994349568, 139635994353663,
+SNULL, 139635998769151, 139635998773247,
+STORE, 139635998765056, 139635998769151,
+STORE, 139635998769152, 139635998773247,
+SNULL, 139636001337343, 139636001341439,
+STORE, 139636001333248, 139636001337343,
+STORE, 139636001337344, 139636001341439,
+SNULL, 94572785418239, 94572785455103,
+STORE, 94572785405952, 94572785418239,
+STORE, 94572785418240, 94572785455103,
+SNULL, 139636003586047, 139636003590143,
+STORE, 139636003581952, 139636003586047,
+STORE, 139636003586048, 139636003590143,
+ERASE, 139636003553280, 139636003581951,
+STORE, 94572798435328, 94572798570495,
+STORE, 139636001853440, 139636003536895,
+STORE, 139635981426688, 139635988426751,
+STORE, 139635980615680, 139635981426687,
+STORE, 94572798435328, 94572798705663,
+STORE, 94572798435328, 94572798840831,
+STORE, 94572798435328, 94572798975999,
+STORE, 94572798435328, 94572799111167,
+STORE, 94572798435328, 94572799246335,
+STORE, 94572798435328, 94572799381503,
+STORE, 94572798435328, 94572799516671,
+STORE, 94572798435328, 94572799651839,
+STORE, 94572798435328, 94572799787007,
+STORE, 94572798435328, 94572799922175,
+STORE, 94572798435328, 94572800057343,
+STORE, 94572798435328, 94572800192511,
+STORE, 94572798435328, 94572800327679,
+STORE, 94572798435328, 94572800462847,
+STORE, 94572798435328, 94572800598015,
+STORE, 94572798435328, 94572800733183,
+STORE, 94572798435328, 94572800868351,
+STORE, 94572798435328, 94572801003519,
+STORE, 94572798435328, 94572801138687,
+STORE, 94572798435328, 94572801273855,
+STORE, 94572798435328, 94572801409023,
+STORE, 94572798435328, 94572801544191,
+STORE, 94572798435328, 94572801679359,
+STORE, 94572798435328, 94572801814527,
+STORE, 94572798435328, 94572801949695,
+STORE, 94572798435328, 94572802084863,
+STORE, 94572798435328, 94572802220031,
+STORE, 94572798435328, 94572802355199,
+STORE, 94572798435328, 94572802490367,
+STORE, 94572798435328, 94572802625535,
+STORE, 94572798435328, 94572802760703,
+STORE, 94572798435328, 94572802895871,
+STORE, 94572798435328, 94572803031039,
+STORE, 94572798435328, 94572803166207,
+STORE, 94572798435328, 94572803301375,
+STORE, 94572798435328, 94572803436543,
+STORE, 94572798435328, 94572803571711,
+STORE, 94572798435328, 94572803706879,
+STORE, 94572798435328, 94572803842047,
+STORE, 94572798435328, 94572803977215,
+STORE, 94572798435328, 94572804112383,
+STORE, 94572798435328, 94572804247551,
+STORE, 94572798435328, 94572804382719,
+STORE, 94572798435328, 94572804517887,
+STORE, 94572798435328, 94572804653055,
+STORE, 94572798435328, 94572804788223,
+STORE, 94572798435328, 94572804923391,
+STORE, 94572798435328, 94572805058559,
+STORE, 94572798435328, 94572805193727,
+STORE, 94572798435328, 94572805328895,
+STORE, 94572798435328, 94572805464063,
+STORE, 94572798435328, 94572805599231,
+STORE, 94572798435328, 94572805734399,
+STORE, 94572798435328, 94572805869567,
+STORE, 94572798435328, 94572806004735,
+STORE, 94572798435328, 94572806139903,
+STORE, 94572798435328, 94572806275071,
+STORE, 94572798435328, 94572806410239,
+STORE, 94572798435328, 94572806545407,
+STORE, 94572798435328, 94572806680575,
+STORE, 94572798435328, 94572806815743,
+STORE, 94572798435328, 94572806950911,
+STORE, 94572798435328, 94572807086079,
+STORE, 94572798435328, 94572807221247,
+STORE, 94572798435328, 94572807356415,
+STORE, 94572798435328, 94572807491583,
+STORE, 94572798435328, 94572807626751,
+STORE, 94572798435328, 94572807761919,
+STORE, 94572798435328, 94572807897087,
+STORE, 94572798435328, 94572808032255,
+STORE, 94572798435328, 94572808167423,
+STORE, 94572798435328, 94572808302591,
+STORE, 94572798435328, 94572808437759,
+STORE, 94572798435328, 94572808572927,
+ERASE, 139635981426688, 139635988426751,
+STORE, 139635985088512, 139635988426751,
+STORE, 139635778273280, 139635980615679,
+STORE, 139635567632384, 139635778273279,
+STORE, 94572798435328, 94572808716287,
+STORE, 139635984564224, 139635985088511,
+STORE, 139635559239680, 139635567632383,
+SNULL, 139635559243775, 139635567632383,
+STORE, 139635559239680, 139635559243775,
+STORE, 139635559243776, 139635567632383,
+STORE, 139635550846976, 139635559239679,
+SNULL, 139635550851071, 139635559239679,
+STORE, 139635550846976, 139635550851071,
+STORE, 139635550851072, 139635559239679,
+STORE, 139635542454272, 139635550846975,
+STORE, 139635408236544, 139635542454271,
+SNULL, 139635408236544, 139635426590719,
+STORE, 139635426590720, 139635542454271,
+STORE, 139635408236544, 139635426590719,
+ERASE, 139635408236544, 139635426590719,
+STORE, 139635292372992, 139635542454271,
+SNULL, 139635359481855, 139635542454271,
+STORE, 139635292372992, 139635359481855,
+STORE, 139635359481856, 139635542454271,
+SNULL, 139635359481856, 139635426590719,
+STORE, 139635426590720, 139635542454271,
+STORE, 139635359481856, 139635426590719,
+ERASE, 139635359481856, 139635426590719,
+SNULL, 139635542458367, 139635550846975,
+STORE, 139635542454272, 139635542458367,
+STORE, 139635542458368, 139635550846975,
+STORE, 139635418198016, 139635426590719,
+SNULL, 139635493699583, 139635542454271,
+STORE, 139635426590720, 139635493699583,
+STORE, 139635493699584, 139635542454271,
+ERASE, 139635493699584, 139635542454271,
+SNULL, 139635426725887, 139635493699583,
+STORE, 139635426590720, 139635426725887,
+STORE, 139635426725888, 139635493699583,
+SNULL, 139635292508159, 139635359481855,
+STORE, 139635292372992, 139635292508159,
+STORE, 139635292508160, 139635359481855,
+SNULL, 139635418202111, 139635426590719,
+STORE, 139635418198016, 139635418202111,
+STORE, 139635418202112, 139635426590719,
+STORE, 139635225264128, 139635292372991,
+STORE, 139635534061568, 139635542454271,
+SNULL, 139635534065663, 139635542454271,
+STORE, 139635534061568, 139635534065663,
+STORE, 139635534065664, 139635542454271,
+STORE, 139635525668864, 139635534061567,
+SNULL, 139635525672959, 139635534061567,
+STORE, 139635525668864, 139635525672959,
+STORE, 139635525672960, 139635534061567,
+SNULL, 139635225399295, 139635292372991,
+STORE, 139635225264128, 139635225399295,
+STORE, 139635225399296, 139635292372991,
+STORE, 139635091046400, 139635225264127,
+SNULL, 139635158155263, 139635225264127,
+STORE, 139635091046400, 139635158155263,
+STORE, 139635158155264, 139635225264127,
+ERASE, 139635158155264, 139635225264127,
+STORE, 139634956828672, 139635158155263,
+STORE, 139635517276160, 139635525668863,
+SNULL, 139635517280255, 139635525668863,
+STORE, 139635517276160, 139635517280255,
+STORE, 139635517280256, 139635525668863,
+SNULL, 139634956828672, 139635091046399,
+STORE, 139635091046400, 139635158155263,
+STORE, 139634956828672, 139635091046399,
+SNULL, 139635091181567, 139635158155263,
+STORE, 139635091046400, 139635091181567,
+STORE, 139635091181568, 139635158155263,
+SNULL, 139635023937535, 139635091046399,
+STORE, 139634956828672, 139635023937535,
+STORE, 139635023937536, 139635091046399,
+ERASE, 139635023937536, 139635091046399,
+STORE, 139634956828672, 139635091046399,
+SNULL, 139634956828672, 139635023937535,
+STORE, 139635023937536, 139635091046399,
+STORE, 139634956828672, 139635023937535,
+SNULL, 139635024072703, 139635091046399,
+STORE, 139635023937536, 139635024072703,
+STORE, 139635024072704, 139635091046399,
+STORE, 139635508883456, 139635517276159,
+SNULL, 139635508887551, 139635517276159,
+STORE, 139635508883456, 139635508887551,
+STORE, 139635508887552, 139635517276159,
+STORE, 139634822610944, 139635023937535,
+SNULL, 139634822610944, 139634956828671,
+STORE, 139634956828672, 139635023937535,
+STORE, 139634822610944, 139634956828671,
+SNULL, 139634956963839, 139635023937535,
+STORE, 139634956828672, 139634956963839,
+STORE, 139634956963840, 139635023937535,
+STORE, 139635500490752, 139635508883455,
+SNULL, 139634889719807, 139634956828671,
+STORE, 139634822610944, 139634889719807,
+STORE, 139634889719808, 139634956828671,
+ERASE, 139634889719808, 139634956828671,
+SNULL, 139635500494847, 139635508883455,
+STORE, 139635500490752, 139635500494847,
+STORE, 139635500494848, 139635508883455,
+SNULL, 139634822746111, 139634889719807,
+STORE, 139634822610944, 139634822746111,
+STORE, 139634822746112, 139634889719807,
+STORE, 139635409805312, 139635418198015,
+STORE, 139634822746112, 139634956828671,
+SNULL, 139634822746112, 139634889719807,
+STORE, 139634889719808, 139634956828671,
+STORE, 139634822746112, 139634889719807,
+SNULL, 139634889854975, 139634956828671,
+STORE, 139634889719808, 139634889854975,
+STORE, 139634889854976, 139634956828671,
+SNULL, 139635409809407, 139635418198015,
+STORE, 139635409805312, 139635409809407,
+STORE, 139635409809408, 139635418198015,
+STORE, 139635401412608, 139635409805311,
+STORE, 139634688393216, 139634822610943,
+SNULL, 139634755502079, 139634822610943,
+STORE, 139634688393216, 139634755502079,
+STORE, 139634755502080, 139634822610943,
+ERASE, 139634755502080, 139634822610943,
+SNULL, 139635401416703, 139635409805311,
+STORE, 139635401412608, 139635401416703,
+STORE, 139635401416704, 139635409805311,
+STORE, 139634554175488, 139634755502079,
+SNULL, 139634554175488, 139634688393215,
+STORE, 139634688393216, 139634755502079,
+STORE, 139634554175488, 139634688393215,
+SNULL, 139634688528383, 139634755502079,
+STORE, 139634688393216, 139634688528383,
+STORE, 139634688528384, 139634755502079,
+STORE, 139635393019904, 139635401412607,
+SNULL, 139634621284351, 139634688393215,
+STORE, 139634554175488, 139634621284351,
+STORE, 139634621284352, 139634688393215,
+ERASE, 139634621284352, 139634688393215,
+SNULL, 139634554310655, 139634621284351,
+STORE, 139634554175488, 139634554310655,
+STORE, 139634554310656, 139634621284351,
+STORE, 139634554310656, 139634688393215,
+SNULL, 139635393023999, 139635401412607,
+STORE, 139635393019904, 139635393023999,
+STORE, 139635393024000, 139635401412607,
+SNULL, 139634554310656, 139634621284351,
+STORE, 139634621284352, 139634688393215,
+STORE, 139634554310656, 139634621284351,
+SNULL, 139634621419519, 139634688393215,
+STORE, 139634621284352, 139634621419519,
+STORE, 139634621419520, 139634688393215,
+STORE, 139635384627200, 139635393019903,
+SNULL, 139635384631295, 139635393019903,
+STORE, 139635384627200, 139635384631295,
+STORE, 139635384631296, 139635393019903,
+STORE, 139635376234496, 139635384627199,
+SNULL, 139635376238591, 139635384627199,
+STORE, 139635376234496, 139635376238591,
+STORE, 139635376238592, 139635384627199,
+STORE, 139635367841792, 139635376234495,
+SNULL, 139635367845887, 139635376234495,
+STORE, 139635367841792, 139635367845887,
+STORE, 139635367845888, 139635376234495,
+STORE, 139634419957760, 139634554175487,
+SNULL, 139634487066623, 139634554175487,
+STORE, 139634419957760, 139634487066623,
+STORE, 139634487066624, 139634554175487,
+ERASE, 139634487066624, 139634554175487,
+STORE, 139635216871424, 139635225264127,
+SNULL, 139635216875519, 139635225264127,
+STORE, 139635216871424, 139635216875519,
+STORE, 139635216875520, 139635225264127,
+SNULL, 139634420092927, 139634487066623,
+STORE, 139634419957760, 139634420092927,
+STORE, 139634420092928, 139634487066623,
+STORE, 139635208478720, 139635216871423,
+SNULL, 139635208482815, 139635216871423,
+STORE, 139635208478720, 139635208482815,
+STORE, 139635208482816, 139635216871423,
+STORE, 139635200086016, 139635208478719,
+SNULL, 139635200090111, 139635208478719,
+STORE, 139635200086016, 139635200090111,
+STORE, 139635200090112, 139635208478719,
+STORE, 139635191693312, 139635200086015,
+SNULL, 139635191697407, 139635200086015,
+STORE, 139635191693312, 139635191697407,
+STORE, 139635191697408, 139635200086015,
+STORE, 139635183300608, 139635191693311,
+SNULL, 139635183304703, 139635191693311,
+STORE, 139635183300608, 139635183304703,
+STORE, 139635183304704, 139635191693311,
+STORE, 139634420092928, 139634554175487,
+SNULL, 139634420092928, 139634487066623,
+STORE, 139634487066624, 139634554175487,
+STORE, 139634420092928, 139634487066623,
+SNULL, 139634487201791, 139634554175487,
+STORE, 139634487066624, 139634487201791,
+STORE, 139634487201792, 139634554175487,
+ERASE, 139635559239680, 139635559243775,
+ERASE, 139635559243776, 139635567632383,
+ERASE, 139635550846976, 139635550851071,
+ERASE, 139635550851072, 139635559239679,
+ERASE, 139635542454272, 139635542458367,
+ERASE, 139635542458368, 139635550846975,
+ERASE, 139635418198016, 139635418202111,
+ERASE, 139635418202112, 139635426590719,
+ERASE, 139635534061568, 139635534065663,
+ERASE, 139635534065664, 139635542454271,
+ERASE, 139635525668864, 139635525672959,
+ERASE, 139635525672960, 139635534061567,
+ERASE, 139635517276160, 139635517280255,
+ERASE, 139635517280256, 139635525668863,
+ERASE, 139635508883456, 139635508887551,
+ERASE, 139635508887552, 139635517276159,
+ERASE, 139635500490752, 139635500494847,
+ERASE, 139635500494848, 139635508883455,
+ERASE, 139635409805312, 139635409809407,
+ERASE, 139635409809408, 139635418198015,
+ERASE, 139635401412608, 139635401416703,
+ERASE, 139635401416704, 139635409805311,
+ERASE, 139635393019904, 139635393023999,
+ERASE, 139635393024000, 139635401412607,
+ERASE, 139635384627200, 139635384631295,
+ERASE, 139635384631296, 139635393019903,
+ };
+ unsigned long set25[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722547441664, 140737488351231,
+SNULL, 140722547449855, 140737488351231,
+STORE, 140722547441664, 140722547449855,
+STORE, 140722547310592, 140722547449855,
+STORE, 94827521732608, 94827523956735,
+SNULL, 94827521843199, 94827523956735,
+STORE, 94827521732608, 94827521843199,
+STORE, 94827521843200, 94827523956735,
+ERASE, 94827521843200, 94827523956735,
+STORE, 94827523936256, 94827523948543,
+STORE, 94827523948544, 94827523956735,
+STORE, 139816136847360, 139816139100159,
+SNULL, 139816136990719, 139816139100159,
+STORE, 139816136847360, 139816136990719,
+STORE, 139816136990720, 139816139100159,
+ERASE, 139816136990720, 139816139100159,
+STORE, 139816139087872, 139816139096063,
+STORE, 139816139096064, 139816139100159,
+STORE, 140722548142080, 140722548146175,
+STORE, 140722548129792, 140722548142079,
+STORE, 139816139059200, 139816139087871,
+STORE, 139816139051008, 139816139059199,
+STORE, 139816133050368, 139816136847359,
+SNULL, 139816133050368, 139816134709247,
+STORE, 139816134709248, 139816136847359,
+STORE, 139816133050368, 139816134709247,
+SNULL, 139816136806399, 139816136847359,
+STORE, 139816134709248, 139816136806399,
+STORE, 139816136806400, 139816136847359,
+SNULL, 139816136806400, 139816136830975,
+STORE, 139816136830976, 139816136847359,
+STORE, 139816136806400, 139816136830975,
+ERASE, 139816136806400, 139816136830975,
+STORE, 139816136806400, 139816136830975,
+ERASE, 139816136830976, 139816136847359,
+STORE, 139816136830976, 139816136847359,
+SNULL, 139816136822783, 139816136830975,
+STORE, 139816136806400, 139816136822783,
+STORE, 139816136822784, 139816136830975,
+SNULL, 94827523944447, 94827523948543,
+STORE, 94827523936256, 94827523944447,
+STORE, 94827523944448, 94827523948543,
+SNULL, 139816139091967, 139816139096063,
+STORE, 139816139087872, 139816139091967,
+STORE, 139816139091968, 139816139096063,
+ERASE, 139816139059200, 139816139087871,
+STORE, 94827534970880, 94827535106047,
+STORE, 94114394132480, 94114394345471,
+STORE, 94114396442624, 94114396446719,
+STORE, 94114396446720, 94114396454911,
+STORE, 94114396454912, 94114396467199,
+STORE, 94114421575680, 94114427715583,
+STORE, 139934313955328, 139934315614207,
+STORE, 139934315614208, 139934317711359,
+STORE, 139934317711360, 139934317727743,
+STORE, 139934317727744, 139934317735935,
+STORE, 139934317735936, 139934317752319,
+STORE, 139934317752320, 139934317764607,
+STORE, 139934317764608, 139934319857663,
+STORE, 139934319857664, 139934319861759,
+STORE, 139934319861760, 139934319865855,
+STORE, 139934319865856, 139934320009215,
+STORE, 139934320377856, 139934322061311,
+STORE, 139934322061312, 139934322077695,
+STORE, 139934322106368, 139934322110463,
+STORE, 139934322110464, 139934322114559,
+STORE, 139934322114560, 139934322118655,
+STORE, 140731200376832, 140731200516095,
+STORE, 140731200929792, 140731200942079,
+STORE, 140731200942080, 140731200946175,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734133174272, 140737488351231,
+SNULL, 140734133182463, 140737488351231,
+STORE, 140734133174272, 140734133182463,
+STORE, 140734133043200, 140734133182463,
+STORE, 94412675600384, 94412677824511,
+SNULL, 94412675710975, 94412677824511,
+STORE, 94412675600384, 94412675710975,
+STORE, 94412675710976, 94412677824511,
+ERASE, 94412675710976, 94412677824511,
+STORE, 94412677804032, 94412677816319,
+STORE, 94412677816320, 94412677824511,
+STORE, 140320087945216, 140320090198015,
+SNULL, 140320088088575, 140320090198015,
+STORE, 140320087945216, 140320088088575,
+STORE, 140320088088576, 140320090198015,
+ERASE, 140320088088576, 140320090198015,
+STORE, 140320090185728, 140320090193919,
+STORE, 140320090193920, 140320090198015,
+STORE, 140734134591488, 140734134595583,
+STORE, 140734134579200, 140734134591487,
+STORE, 140320090157056, 140320090185727,
+STORE, 140320090148864, 140320090157055,
+STORE, 140320084148224, 140320087945215,
+SNULL, 140320084148224, 140320085807103,
+STORE, 140320085807104, 140320087945215,
+STORE, 140320084148224, 140320085807103,
+SNULL, 140320087904255, 140320087945215,
+STORE, 140320085807104, 140320087904255,
+STORE, 140320087904256, 140320087945215,
+SNULL, 140320087904256, 140320087928831,
+STORE, 140320087928832, 140320087945215,
+STORE, 140320087904256, 140320087928831,
+ERASE, 140320087904256, 140320087928831,
+STORE, 140320087904256, 140320087928831,
+ERASE, 140320087928832, 140320087945215,
+STORE, 140320087928832, 140320087945215,
+SNULL, 140320087920639, 140320087928831,
+STORE, 140320087904256, 140320087920639,
+STORE, 140320087920640, 140320087928831,
+SNULL, 94412677812223, 94412677816319,
+STORE, 94412677804032, 94412677812223,
+STORE, 94412677812224, 94412677816319,
+SNULL, 140320090189823, 140320090193919,
+STORE, 140320090185728, 140320090189823,
+STORE, 140320090189824, 140320090193919,
+ERASE, 140320090157056, 140320090185727,
+STORE, 94412684546048, 94412684681215,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723005485056, 140737488351231,
+SNULL, 140723005493247, 140737488351231,
+STORE, 140723005485056, 140723005493247,
+STORE, 140723005353984, 140723005493247,
+STORE, 94387431936000, 94387434160127,
+SNULL, 94387432046591, 94387434160127,
+STORE, 94387431936000, 94387432046591,
+STORE, 94387432046592, 94387434160127,
+ERASE, 94387432046592, 94387434160127,
+STORE, 94387434139648, 94387434151935,
+STORE, 94387434151936, 94387434160127,
+STORE, 140151675392000, 140151677644799,
+SNULL, 140151675535359, 140151677644799,
+STORE, 140151675392000, 140151675535359,
+STORE, 140151675535360, 140151677644799,
+ERASE, 140151675535360, 140151677644799,
+STORE, 140151677632512, 140151677640703,
+STORE, 140151677640704, 140151677644799,
+STORE, 140723005784064, 140723005788159,
+STORE, 140723005771776, 140723005784063,
+STORE, 140151677603840, 140151677632511,
+STORE, 140151677595648, 140151677603839,
+STORE, 140151671595008, 140151675391999,
+SNULL, 140151671595008, 140151673253887,
+STORE, 140151673253888, 140151675391999,
+STORE, 140151671595008, 140151673253887,
+SNULL, 140151675351039, 140151675391999,
+STORE, 140151673253888, 140151675351039,
+STORE, 140151675351040, 140151675391999,
+SNULL, 140151675351040, 140151675375615,
+STORE, 140151675375616, 140151675391999,
+STORE, 140151675351040, 140151675375615,
+ERASE, 140151675351040, 140151675375615,
+STORE, 140151675351040, 140151675375615,
+ERASE, 140151675375616, 140151675391999,
+STORE, 140151675375616, 140151675391999,
+SNULL, 140151675367423, 140151675375615,
+STORE, 140151675351040, 140151675367423,
+STORE, 140151675367424, 140151675375615,
+SNULL, 94387434147839, 94387434151935,
+STORE, 94387434139648, 94387434147839,
+STORE, 94387434147840, 94387434151935,
+SNULL, 140151677636607, 140151677640703,
+STORE, 140151677632512, 140151677636607,
+STORE, 140151677636608, 140151677640703,
+ERASE, 140151677603840, 140151677632511,
+STORE, 94387458818048, 94387458953215,
+STORE, 94909010997248, 94909011210239,
+STORE, 94909013307392, 94909013311487,
+STORE, 94909013311488, 94909013319679,
+STORE, 94909013319680, 94909013331967,
+STORE, 94909014827008, 94909023371263,
+STORE, 140712411975680, 140712413634559,
+STORE, 140712413634560, 140712415731711,
+STORE, 140712415731712, 140712415748095,
+STORE, 140712415748096, 140712415756287,
+STORE, 140712415756288, 140712415772671,
+STORE, 140712415772672, 140712415784959,
+STORE, 140712415784960, 140712417878015,
+STORE, 140712417878016, 140712417882111,
+STORE, 140712417882112, 140712417886207,
+STORE, 140712417886208, 140712418029567,
+STORE, 140712418398208, 140712420081663,
+STORE, 140712420081664, 140712420098047,
+STORE, 140712420126720, 140712420130815,
+STORE, 140712420130816, 140712420134911,
+STORE, 140712420134912, 140712420139007,
+STORE, 140729293111296, 140729293250559,
+STORE, 140729293307904, 140729293320191,
+STORE, 140729293320192, 140729293324287,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720541691904, 140737488351231,
+SNULL, 140720541700095, 140737488351231,
+STORE, 140720541691904, 140720541700095,
+STORE, 140720541560832, 140720541700095,
+STORE, 94203603419136, 94203605643263,
+SNULL, 94203603529727, 94203605643263,
+STORE, 94203603419136, 94203603529727,
+STORE, 94203603529728, 94203605643263,
+ERASE, 94203603529728, 94203605643263,
+STORE, 94203605622784, 94203605635071,
+STORE, 94203605635072, 94203605643263,
+STORE, 139847623081984, 139847625334783,
+SNULL, 139847623225343, 139847625334783,
+STORE, 139847623081984, 139847623225343,
+STORE, 139847623225344, 139847625334783,
+ERASE, 139847623225344, 139847625334783,
+STORE, 139847625322496, 139847625330687,
+STORE, 139847625330688, 139847625334783,
+STORE, 140720542547968, 140720542552063,
+STORE, 140720542535680, 140720542547967,
+STORE, 139847625293824, 139847625322495,
+STORE, 139847625285632, 139847625293823,
+STORE, 139847619284992, 139847623081983,
+SNULL, 139847619284992, 139847620943871,
+STORE, 139847620943872, 139847623081983,
+STORE, 139847619284992, 139847620943871,
+SNULL, 139847623041023, 139847623081983,
+STORE, 139847620943872, 139847623041023,
+STORE, 139847623041024, 139847623081983,
+SNULL, 139847623041024, 139847623065599,
+STORE, 139847623065600, 139847623081983,
+STORE, 139847623041024, 139847623065599,
+ERASE, 139847623041024, 139847623065599,
+STORE, 139847623041024, 139847623065599,
+ERASE, 139847623065600, 139847623081983,
+STORE, 139847623065600, 139847623081983,
+SNULL, 139847623057407, 139847623065599,
+STORE, 139847623041024, 139847623057407,
+STORE, 139847623057408, 139847623065599,
+SNULL, 94203605630975, 94203605635071,
+STORE, 94203605622784, 94203605630975,
+STORE, 94203605630976, 94203605635071,
+SNULL, 139847625326591, 139847625330687,
+STORE, 139847625322496, 139847625326591,
+STORE, 139847625326592, 139847625330687,
+ERASE, 139847625293824, 139847625322495,
+STORE, 94203634880512, 94203635015679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721428738048, 140737488351231,
+SNULL, 140721428746239, 140737488351231,
+STORE, 140721428738048, 140721428746239,
+STORE, 140721428606976, 140721428746239,
+STORE, 93968808378368, 93968810602495,
+SNULL, 93968808488959, 93968810602495,
+STORE, 93968808378368, 93968808488959,
+STORE, 93968808488960, 93968810602495,
+ERASE, 93968808488960, 93968810602495,
+STORE, 93968810582016, 93968810594303,
+STORE, 93968810594304, 93968810602495,
+STORE, 140397757026304, 140397759279103,
+SNULL, 140397757169663, 140397759279103,
+STORE, 140397757026304, 140397757169663,
+STORE, 140397757169664, 140397759279103,
+ERASE, 140397757169664, 140397759279103,
+STORE, 140397759266816, 140397759275007,
+STORE, 140397759275008, 140397759279103,
+STORE, 140721430368256, 140721430372351,
+STORE, 140721430355968, 140721430368255,
+STORE, 140397759238144, 140397759266815,
+STORE, 140397759229952, 140397759238143,
+STORE, 140397753229312, 140397757026303,
+SNULL, 140397753229312, 140397754888191,
+STORE, 140397754888192, 140397757026303,
+STORE, 140397753229312, 140397754888191,
+SNULL, 140397756985343, 140397757026303,
+STORE, 140397754888192, 140397756985343,
+STORE, 140397756985344, 140397757026303,
+SNULL, 140397756985344, 140397757009919,
+STORE, 140397757009920, 140397757026303,
+STORE, 140397756985344, 140397757009919,
+ERASE, 140397756985344, 140397757009919,
+STORE, 140397756985344, 140397757009919,
+ERASE, 140397757009920, 140397757026303,
+STORE, 140397757009920, 140397757026303,
+SNULL, 140397757001727, 140397757009919,
+STORE, 140397756985344, 140397757001727,
+STORE, 140397757001728, 140397757009919,
+SNULL, 93968810590207, 93968810594303,
+STORE, 93968810582016, 93968810590207,
+STORE, 93968810590208, 93968810594303,
+SNULL, 140397759270911, 140397759275007,
+STORE, 140397759266816, 140397759270911,
+STORE, 140397759270912, 140397759275007,
+ERASE, 140397759238144, 140397759266815,
+STORE, 93968837025792, 93968837160959,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721751044096, 140737488351231,
+SNULL, 140721751052287, 140737488351231,
+STORE, 140721751044096, 140721751052287,
+STORE, 140721750913024, 140721751052287,
+STORE, 94426051657728, 94426053881855,
+SNULL, 94426051768319, 94426053881855,
+STORE, 94426051657728, 94426051768319,
+STORE, 94426051768320, 94426053881855,
+ERASE, 94426051768320, 94426053881855,
+STORE, 94426053861376, 94426053873663,
+STORE, 94426053873664, 94426053881855,
+STORE, 140228456181760, 140228458434559,
+SNULL, 140228456325119, 140228458434559,
+STORE, 140228456181760, 140228456325119,
+STORE, 140228456325120, 140228458434559,
+ERASE, 140228456325120, 140228458434559,
+STORE, 140228458422272, 140228458430463,
+STORE, 140228458430464, 140228458434559,
+STORE, 140721751117824, 140721751121919,
+STORE, 140721751105536, 140721751117823,
+STORE, 140228458393600, 140228458422271,
+STORE, 140228458385408, 140228458393599,
+STORE, 140228452384768, 140228456181759,
+SNULL, 140228452384768, 140228454043647,
+STORE, 140228454043648, 140228456181759,
+STORE, 140228452384768, 140228454043647,
+SNULL, 140228456140799, 140228456181759,
+STORE, 140228454043648, 140228456140799,
+STORE, 140228456140800, 140228456181759,
+SNULL, 140228456140800, 140228456165375,
+STORE, 140228456165376, 140228456181759,
+STORE, 140228456140800, 140228456165375,
+ERASE, 140228456140800, 140228456165375,
+STORE, 140228456140800, 140228456165375,
+ERASE, 140228456165376, 140228456181759,
+STORE, 140228456165376, 140228456181759,
+SNULL, 140228456157183, 140228456165375,
+STORE, 140228456140800, 140228456157183,
+STORE, 140228456157184, 140228456165375,
+SNULL, 94426053869567, 94426053873663,
+STORE, 94426053861376, 94426053869567,
+STORE, 94426053869568, 94426053873663,
+SNULL, 140228458426367, 140228458430463,
+STORE, 140228458422272, 140228458426367,
+STORE, 140228458426368, 140228458430463,
+ERASE, 140228458393600, 140228458422271,
+STORE, 94426073681920, 94426073817087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732727623680, 140737488351231,
+SNULL, 140732727631871, 140737488351231,
+STORE, 140732727623680, 140732727631871,
+STORE, 140732727492608, 140732727631871,
+STORE, 94537485996032, 94537488220159,
+SNULL, 94537486106623, 94537488220159,
+STORE, 94537485996032, 94537486106623,
+STORE, 94537486106624, 94537488220159,
+ERASE, 94537486106624, 94537488220159,
+STORE, 94537488199680, 94537488211967,
+STORE, 94537488211968, 94537488220159,
+STORE, 140446578036736, 140446580289535,
+SNULL, 140446578180095, 140446580289535,
+STORE, 140446578036736, 140446578180095,
+STORE, 140446578180096, 140446580289535,
+ERASE, 140446578180096, 140446580289535,
+STORE, 140446580277248, 140446580285439,
+STORE, 140446580285440, 140446580289535,
+STORE, 140732727758848, 140732727762943,
+STORE, 140732727746560, 140732727758847,
+STORE, 140446580248576, 140446580277247,
+STORE, 140446580240384, 140446580248575,
+STORE, 140446574239744, 140446578036735,
+SNULL, 140446574239744, 140446575898623,
+STORE, 140446575898624, 140446578036735,
+STORE, 140446574239744, 140446575898623,
+SNULL, 140446577995775, 140446578036735,
+STORE, 140446575898624, 140446577995775,
+STORE, 140446577995776, 140446578036735,
+SNULL, 140446577995776, 140446578020351,
+STORE, 140446578020352, 140446578036735,
+STORE, 140446577995776, 140446578020351,
+ERASE, 140446577995776, 140446578020351,
+STORE, 140446577995776, 140446578020351,
+ERASE, 140446578020352, 140446578036735,
+STORE, 140446578020352, 140446578036735,
+SNULL, 140446578012159, 140446578020351,
+STORE, 140446577995776, 140446578012159,
+STORE, 140446578012160, 140446578020351,
+SNULL, 94537488207871, 94537488211967,
+STORE, 94537488199680, 94537488207871,
+STORE, 94537488207872, 94537488211967,
+SNULL, 140446580281343, 140446580285439,
+STORE, 140446580277248, 140446580281343,
+STORE, 140446580281344, 140446580285439,
+ERASE, 140446580248576, 140446580277247,
+STORE, 94537489014784, 94537489149951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728766808064, 140737488351231,
+SNULL, 140728766816255, 140737488351231,
+STORE, 140728766808064, 140728766816255,
+STORE, 140728766676992, 140728766816255,
+STORE, 94418513866752, 94418516090879,
+SNULL, 94418513977343, 94418516090879,
+STORE, 94418513866752, 94418513977343,
+STORE, 94418513977344, 94418516090879,
+ERASE, 94418513977344, 94418516090879,
+STORE, 94418516070400, 94418516082687,
+STORE, 94418516082688, 94418516090879,
+STORE, 140556479520768, 140556481773567,
+SNULL, 140556479664127, 140556481773567,
+STORE, 140556479520768, 140556479664127,
+STORE, 140556479664128, 140556481773567,
+ERASE, 140556479664128, 140556481773567,
+STORE, 140556481761280, 140556481769471,
+STORE, 140556481769472, 140556481773567,
+STORE, 140728767148032, 140728767152127,
+STORE, 140728767135744, 140728767148031,
+STORE, 140556481732608, 140556481761279,
+STORE, 140556481724416, 140556481732607,
+STORE, 140556475723776, 140556479520767,
+SNULL, 140556475723776, 140556477382655,
+STORE, 140556477382656, 140556479520767,
+STORE, 140556475723776, 140556477382655,
+SNULL, 140556479479807, 140556479520767,
+STORE, 140556477382656, 140556479479807,
+STORE, 140556479479808, 140556479520767,
+SNULL, 140556479479808, 140556479504383,
+STORE, 140556479504384, 140556479520767,
+STORE, 140556479479808, 140556479504383,
+ERASE, 140556479479808, 140556479504383,
+STORE, 140556479479808, 140556479504383,
+ERASE, 140556479504384, 140556479520767,
+STORE, 140556479504384, 140556479520767,
+SNULL, 140556479496191, 140556479504383,
+STORE, 140556479479808, 140556479496191,
+STORE, 140556479496192, 140556479504383,
+SNULL, 94418516078591, 94418516082687,
+STORE, 94418516070400, 94418516078591,
+STORE, 94418516078592, 94418516082687,
+SNULL, 140556481765375, 140556481769471,
+STORE, 140556481761280, 140556481765375,
+STORE, 140556481765376, 140556481769471,
+ERASE, 140556481732608, 140556481761279,
+STORE, 94418541113344, 94418541248511,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723945873408, 140737488351231,
+SNULL, 140723945881599, 140737488351231,
+STORE, 140723945873408, 140723945881599,
+STORE, 140723945742336, 140723945881599,
+STORE, 94543169773568, 94543171997695,
+SNULL, 94543169884159, 94543171997695,
+STORE, 94543169773568, 94543169884159,
+STORE, 94543169884160, 94543171997695,
+ERASE, 94543169884160, 94543171997695,
+STORE, 94543171977216, 94543171989503,
+STORE, 94543171989504, 94543171997695,
+STORE, 139890420883456, 139890423136255,
+SNULL, 139890421026815, 139890423136255,
+STORE, 139890420883456, 139890421026815,
+STORE, 139890421026816, 139890423136255,
+ERASE, 139890421026816, 139890423136255,
+STORE, 139890423123968, 139890423132159,
+STORE, 139890423132160, 139890423136255,
+STORE, 140723946102784, 140723946106879,
+STORE, 140723946090496, 140723946102783,
+STORE, 139890423095296, 139890423123967,
+STORE, 139890423087104, 139890423095295,
+STORE, 139890417086464, 139890420883455,
+SNULL, 139890417086464, 139890418745343,
+STORE, 139890418745344, 139890420883455,
+STORE, 139890417086464, 139890418745343,
+SNULL, 139890420842495, 139890420883455,
+STORE, 139890418745344, 139890420842495,
+STORE, 139890420842496, 139890420883455,
+SNULL, 139890420842496, 139890420867071,
+STORE, 139890420867072, 139890420883455,
+STORE, 139890420842496, 139890420867071,
+ERASE, 139890420842496, 139890420867071,
+STORE, 139890420842496, 139890420867071,
+ERASE, 139890420867072, 139890420883455,
+STORE, 139890420867072, 139890420883455,
+SNULL, 139890420858879, 139890420867071,
+STORE, 139890420842496, 139890420858879,
+STORE, 139890420858880, 139890420867071,
+SNULL, 94543171985407, 94543171989503,
+STORE, 94543171977216, 94543171985407,
+STORE, 94543171985408, 94543171989503,
+SNULL, 139890423128063, 139890423132159,
+STORE, 139890423123968, 139890423128063,
+STORE, 139890423128064, 139890423132159,
+ERASE, 139890423095296, 139890423123967,
+STORE, 94543197097984, 94543197233151,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736205979648, 140737488351231,
+SNULL, 140736205987839, 140737488351231,
+STORE, 140736205979648, 140736205987839,
+STORE, 140736205848576, 140736205987839,
+STORE, 94913209913344, 94913212137471,
+SNULL, 94913210023935, 94913212137471,
+STORE, 94913209913344, 94913210023935,
+STORE, 94913210023936, 94913212137471,
+ERASE, 94913210023936, 94913212137471,
+STORE, 94913212116992, 94913212129279,
+STORE, 94913212129280, 94913212137471,
+STORE, 140006323052544, 140006325305343,
+SNULL, 140006323195903, 140006325305343,
+STORE, 140006323052544, 140006323195903,
+STORE, 140006323195904, 140006325305343,
+ERASE, 140006323195904, 140006325305343,
+STORE, 140006325293056, 140006325301247,
+STORE, 140006325301248, 140006325305343,
+STORE, 140736206716928, 140736206721023,
+STORE, 140736206704640, 140736206716927,
+STORE, 140006325264384, 140006325293055,
+STORE, 140006325256192, 140006325264383,
+STORE, 140006319255552, 140006323052543,
+SNULL, 140006319255552, 140006320914431,
+STORE, 140006320914432, 140006323052543,
+STORE, 140006319255552, 140006320914431,
+SNULL, 140006323011583, 140006323052543,
+STORE, 140006320914432, 140006323011583,
+STORE, 140006323011584, 140006323052543,
+SNULL, 140006323011584, 140006323036159,
+STORE, 140006323036160, 140006323052543,
+STORE, 140006323011584, 140006323036159,
+ERASE, 140006323011584, 140006323036159,
+STORE, 140006323011584, 140006323036159,
+ERASE, 140006323036160, 140006323052543,
+STORE, 140006323036160, 140006323052543,
+SNULL, 140006323027967, 140006323036159,
+STORE, 140006323011584, 140006323027967,
+STORE, 140006323027968, 140006323036159,
+SNULL, 94913212125183, 94913212129279,
+STORE, 94913212116992, 94913212125183,
+STORE, 94913212125184, 94913212129279,
+SNULL, 140006325297151, 140006325301247,
+STORE, 140006325293056, 140006325297151,
+STORE, 140006325297152, 140006325301247,
+ERASE, 140006325264384, 140006325293055,
+STORE, 94913239932928, 94913240068095,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726926897152, 140737488351231,
+SNULL, 140726926905343, 140737488351231,
+STORE, 140726926897152, 140726926905343,
+STORE, 140726926766080, 140726926905343,
+STORE, 94213246820352, 94213249044479,
+SNULL, 94213246930943, 94213249044479,
+STORE, 94213246820352, 94213246930943,
+STORE, 94213246930944, 94213249044479,
+ERASE, 94213246930944, 94213249044479,
+STORE, 94213249024000, 94213249036287,
+STORE, 94213249036288, 94213249044479,
+STORE, 140368830242816, 140368832495615,
+SNULL, 140368830386175, 140368832495615,
+STORE, 140368830242816, 140368830386175,
+STORE, 140368830386176, 140368832495615,
+ERASE, 140368830386176, 140368832495615,
+STORE, 140368832483328, 140368832491519,
+STORE, 140368832491520, 140368832495615,
+STORE, 140726926999552, 140726927003647,
+STORE, 140726926987264, 140726926999551,
+STORE, 140368832454656, 140368832483327,
+STORE, 140368832446464, 140368832454655,
+STORE, 140368826445824, 140368830242815,
+SNULL, 140368826445824, 140368828104703,
+STORE, 140368828104704, 140368830242815,
+STORE, 140368826445824, 140368828104703,
+SNULL, 140368830201855, 140368830242815,
+STORE, 140368828104704, 140368830201855,
+STORE, 140368830201856, 140368830242815,
+SNULL, 140368830201856, 140368830226431,
+STORE, 140368830226432, 140368830242815,
+STORE, 140368830201856, 140368830226431,
+ERASE, 140368830201856, 140368830226431,
+STORE, 140368830201856, 140368830226431,
+ERASE, 140368830226432, 140368830242815,
+STORE, 140368830226432, 140368830242815,
+SNULL, 140368830218239, 140368830226431,
+STORE, 140368830201856, 140368830218239,
+STORE, 140368830218240, 140368830226431,
+SNULL, 94213249032191, 94213249036287,
+STORE, 94213249024000, 94213249032191,
+STORE, 94213249032192, 94213249036287,
+SNULL, 140368832487423, 140368832491519,
+STORE, 140368832483328, 140368832487423,
+STORE, 140368832487424, 140368832491519,
+ERASE, 140368832454656, 140368832483327,
+STORE, 94213267435520, 94213267570687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728954130432, 140737488351231,
+SNULL, 140728954138623, 140737488351231,
+STORE, 140728954130432, 140728954138623,
+STORE, 140728953999360, 140728954138623,
+STORE, 94672570966016, 94672573190143,
+SNULL, 94672571076607, 94672573190143,
+STORE, 94672570966016, 94672571076607,
+STORE, 94672571076608, 94672573190143,
+ERASE, 94672571076608, 94672573190143,
+STORE, 94672573169664, 94672573181951,
+STORE, 94672573181952, 94672573190143,
+STORE, 140201696735232, 140201698988031,
+SNULL, 140201696878591, 140201698988031,
+STORE, 140201696735232, 140201696878591,
+STORE, 140201696878592, 140201698988031,
+ERASE, 140201696878592, 140201698988031,
+STORE, 140201698975744, 140201698983935,
+STORE, 140201698983936, 140201698988031,
+STORE, 140728954163200, 140728954167295,
+STORE, 140728954150912, 140728954163199,
+STORE, 140201698947072, 140201698975743,
+STORE, 140201698938880, 140201698947071,
+STORE, 140201692938240, 140201696735231,
+SNULL, 140201692938240, 140201694597119,
+STORE, 140201694597120, 140201696735231,
+STORE, 140201692938240, 140201694597119,
+SNULL, 140201696694271, 140201696735231,
+STORE, 140201694597120, 140201696694271,
+STORE, 140201696694272, 140201696735231,
+SNULL, 140201696694272, 140201696718847,
+STORE, 140201696718848, 140201696735231,
+STORE, 140201696694272, 140201696718847,
+ERASE, 140201696694272, 140201696718847,
+STORE, 140201696694272, 140201696718847,
+ERASE, 140201696718848, 140201696735231,
+STORE, 140201696718848, 140201696735231,
+SNULL, 140201696710655, 140201696718847,
+STORE, 140201696694272, 140201696710655,
+STORE, 140201696710656, 140201696718847,
+SNULL, 94672573177855, 94672573181951,
+STORE, 94672573169664, 94672573177855,
+STORE, 94672573177856, 94672573181951,
+SNULL, 140201698979839, 140201698983935,
+STORE, 140201698975744, 140201698979839,
+STORE, 140201698979840, 140201698983935,
+ERASE, 140201698947072, 140201698975743,
+STORE, 94672595689472, 94672595824639,
+STORE, 94114394132480, 94114394345471,
+STORE, 94114396442624, 94114396446719,
+STORE, 94114396446720, 94114396454911,
+STORE, 94114396454912, 94114396467199,
+STORE, 94114421575680, 94114428256255,
+STORE, 139934313955328, 139934315614207,
+STORE, 139934315614208, 139934317711359,
+STORE, 139934317711360, 139934317727743,
+STORE, 139934317727744, 139934317735935,
+STORE, 139934317735936, 139934317752319,
+STORE, 139934317752320, 139934317764607,
+STORE, 139934317764608, 139934319857663,
+STORE, 139934319857664, 139934319861759,
+STORE, 139934319861760, 139934319865855,
+STORE, 139934319865856, 139934320009215,
+STORE, 139934320377856, 139934322061311,
+STORE, 139934322061312, 139934322077695,
+STORE, 139934322106368, 139934322110463,
+STORE, 139934322110464, 139934322114559,
+STORE, 139934322114560, 139934322118655,
+STORE, 140731200376832, 140731200516095,
+STORE, 140731200929792, 140731200942079,
+STORE, 140731200942080, 140731200946175,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721532362752, 140737488351231,
+SNULL, 140721532370943, 140737488351231,
+STORE, 140721532362752, 140721532370943,
+STORE, 140721532231680, 140721532370943,
+STORE, 94467222597632, 94467224821759,
+SNULL, 94467222708223, 94467224821759,
+STORE, 94467222597632, 94467222708223,
+STORE, 94467222708224, 94467224821759,
+ERASE, 94467222708224, 94467224821759,
+STORE, 94467224801280, 94467224813567,
+STORE, 94467224813568, 94467224821759,
+STORE, 140191433543680, 140191435796479,
+SNULL, 140191433687039, 140191435796479,
+STORE, 140191433543680, 140191433687039,
+STORE, 140191433687040, 140191435796479,
+ERASE, 140191433687040, 140191435796479,
+STORE, 140191435784192, 140191435792383,
+STORE, 140191435792384, 140191435796479,
+STORE, 140721533034496, 140721533038591,
+STORE, 140721533022208, 140721533034495,
+STORE, 140191435755520, 140191435784191,
+STORE, 140191435747328, 140191435755519,
+STORE, 140191429746688, 140191433543679,
+SNULL, 140191429746688, 140191431405567,
+STORE, 140191431405568, 140191433543679,
+STORE, 140191429746688, 140191431405567,
+SNULL, 140191433502719, 140191433543679,
+STORE, 140191431405568, 140191433502719,
+STORE, 140191433502720, 140191433543679,
+SNULL, 140191433502720, 140191433527295,
+STORE, 140191433527296, 140191433543679,
+STORE, 140191433502720, 140191433527295,
+ERASE, 140191433502720, 140191433527295,
+STORE, 140191433502720, 140191433527295,
+ERASE, 140191433527296, 140191433543679,
+STORE, 140191433527296, 140191433543679,
+SNULL, 140191433519103, 140191433527295,
+STORE, 140191433502720, 140191433519103,
+STORE, 140191433519104, 140191433527295,
+SNULL, 94467224809471, 94467224813567,
+STORE, 94467224801280, 94467224809471,
+STORE, 94467224809472, 94467224813567,
+SNULL, 140191435788287, 140191435792383,
+STORE, 140191435784192, 140191435788287,
+STORE, 140191435788288, 140191435792383,
+ERASE, 140191435755520, 140191435784191,
+STORE, 94467251847168, 94467251982335,
+STORE, 94367895400448, 94367895613439,
+STORE, 94367897710592, 94367897714687,
+STORE, 94367897714688, 94367897722879,
+STORE, 94367897722880, 94367897735167,
+STORE, 94367925264384, 94367926861823,
+STORE, 139801317548032, 139801319206911,
+STORE, 139801319206912, 139801321304063,
+STORE, 139801321304064, 139801321320447,
+STORE, 139801321320448, 139801321328639,
+STORE, 139801321328640, 139801321345023,
+STORE, 139801321345024, 139801321357311,
+STORE, 139801321357312, 139801323450367,
+STORE, 139801323450368, 139801323454463,
+STORE, 139801323454464, 139801323458559,
+STORE, 139801323458560, 139801323601919,
+STORE, 139801323970560, 139801325654015,
+STORE, 139801325654016, 139801325670399,
+STORE, 139801325699072, 139801325703167,
+STORE, 139801325703168, 139801325707263,
+STORE, 139801325707264, 139801325711359,
+STORE, 140724442861568, 140724443000831,
+STORE, 140724443611136, 140724443623423,
+STORE, 140724443623424, 140724443627519,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731353149440, 140737488351231,
+SNULL, 140731353157631, 140737488351231,
+STORE, 140731353149440, 140731353157631,
+STORE, 140731353018368, 140731353157631,
+STORE, 94310379503616, 94310381838335,
+SNULL, 94310379716607, 94310381838335,
+STORE, 94310379503616, 94310379716607,
+STORE, 94310379716608, 94310381838335,
+ERASE, 94310379716608, 94310381838335,
+STORE, 94310381813760, 94310381826047,
+STORE, 94310381826048, 94310381838335,
+STORE, 140515434659840, 140515436912639,
+SNULL, 140515434803199, 140515436912639,
+STORE, 140515434659840, 140515434803199,
+STORE, 140515434803200, 140515436912639,
+ERASE, 140515434803200, 140515436912639,
+STORE, 140515436900352, 140515436908543,
+STORE, 140515436908544, 140515436912639,
+STORE, 140731353886720, 140731353890815,
+STORE, 140731353874432, 140731353886719,
+STORE, 140515436871680, 140515436900351,
+STORE, 140515436863488, 140515436871679,
+STORE, 140515432546304, 140515434659839,
+SNULL, 140515432546304, 140515432558591,
+STORE, 140515432558592, 140515434659839,
+STORE, 140515432546304, 140515432558591,
+SNULL, 140515434651647, 140515434659839,
+STORE, 140515432558592, 140515434651647,
+STORE, 140515434651648, 140515434659839,
+ERASE, 140515434651648, 140515434659839,
+STORE, 140515434651648, 140515434659839,
+STORE, 140515428749312, 140515432546303,
+SNULL, 140515428749312, 140515430408191,
+STORE, 140515430408192, 140515432546303,
+STORE, 140515428749312, 140515430408191,
+SNULL, 140515432505343, 140515432546303,
+STORE, 140515430408192, 140515432505343,
+STORE, 140515432505344, 140515432546303,
+SNULL, 140515432505344, 140515432529919,
+STORE, 140515432529920, 140515432546303,
+STORE, 140515432505344, 140515432529919,
+ERASE, 140515432505344, 140515432529919,
+STORE, 140515432505344, 140515432529919,
+ERASE, 140515432529920, 140515432546303,
+STORE, 140515432529920, 140515432546303,
+STORE, 140515436855296, 140515436871679,
+SNULL, 140515432521727, 140515432529919,
+STORE, 140515432505344, 140515432521727,
+STORE, 140515432521728, 140515432529919,
+SNULL, 140515434655743, 140515434659839,
+STORE, 140515434651648, 140515434655743,
+STORE, 140515434655744, 140515434659839,
+SNULL, 94310381817855, 94310381826047,
+STORE, 94310381813760, 94310381817855,
+STORE, 94310381817856, 94310381826047,
+SNULL, 140515436904447, 140515436908543,
+STORE, 140515436900352, 140515436904447,
+STORE, 140515436904448, 140515436908543,
+ERASE, 140515436871680, 140515436900351,
+STORE, 94310395457536, 94310395592703,
+STORE, 140515435171840, 140515436855295,
+STORE, 94310395457536, 94310395727871,
+STORE, 94310395457536, 94310395863039,
+STORE, 94310395457536, 94310396047359,
+SNULL, 94310396022783, 94310396047359,
+STORE, 94310395457536, 94310396022783,
+STORE, 94310396022784, 94310396047359,
+ERASE, 94310396022784, 94310396047359,
+STORE, 94310395457536, 94310396157951,
+STORE, 94310395457536, 94310396293119,
+SNULL, 94310396276735, 94310396293119,
+STORE, 94310395457536, 94310396276735,
+STORE, 94310396276736, 94310396293119,
+ERASE, 94310396276736, 94310396293119,
+STORE, 94310395457536, 94310396411903,
+SNULL, 94310396383231, 94310396411903,
+STORE, 94310395457536, 94310396383231,
+STORE, 94310396383232, 94310396411903,
+ERASE, 94310396383232, 94310396411903,
+STORE, 94310395457536, 94310396522495,
+STORE, 94310395457536, 94310396674047,
+SNULL, 94310396657663, 94310396674047,
+STORE, 94310395457536, 94310396657663,
+STORE, 94310396657664, 94310396674047,
+ERASE, 94310396657664, 94310396674047,
+SNULL, 94310396624895, 94310396657663,
+STORE, 94310395457536, 94310396624895,
+STORE, 94310396624896, 94310396657663,
+ERASE, 94310396624896, 94310396657663,
+STORE, 94310395457536, 94310396776447,
+SNULL, 94310396764159, 94310396776447,
+STORE, 94310395457536, 94310396764159,
+STORE, 94310396764160, 94310396776447,
+ERASE, 94310396764160, 94310396776447,
+SNULL, 94310396739583, 94310396764159,
+STORE, 94310395457536, 94310396739583,
+STORE, 94310396739584, 94310396764159,
+ERASE, 94310396739584, 94310396764159,
+STORE, 94310395457536, 94310396882943,
+STORE, 94310395457536, 94310397018111,
+STORE, 94310395457536, 94310397161471,
+STORE, 94310395457536, 94310397300735,
+SNULL, 94310397292543, 94310397300735,
+STORE, 94310395457536, 94310397292543,
+STORE, 94310397292544, 94310397300735,
+ERASE, 94310397292544, 94310397300735,
+STORE, 94359222210560, 94359222423551,
+STORE, 94359224520704, 94359224524799,
+STORE, 94359224524800, 94359224532991,
+STORE, 94359224532992, 94359224545279,
+STORE, 94359238348800, 94359239385087,
+STORE, 140675699838976, 140675701497855,
+STORE, 140675701497856, 140675703595007,
+STORE, 140675703595008, 140675703611391,
+STORE, 140675703611392, 140675703619583,
+STORE, 140675703619584, 140675703635967,
+STORE, 140675703635968, 140675703648255,
+STORE, 140675703648256, 140675705741311,
+STORE, 140675705741312, 140675705745407,
+STORE, 140675705745408, 140675705749503,
+STORE, 140675705749504, 140675705892863,
+STORE, 140675706261504, 140675707944959,
+STORE, 140675707944960, 140675707961343,
+STORE, 140675707990016, 140675707994111,
+STORE, 140675707994112, 140675707998207,
+STORE, 140675707998208, 140675708002303,
+STORE, 140721324634112, 140721324773375,
+STORE, 140721324810240, 140721324822527,
+STORE, 140721324822528, 140721324826623,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724099678208, 140737488351231,
+SNULL, 140724099686399, 140737488351231,
+STORE, 140724099678208, 140724099686399,
+STORE, 140724099547136, 140724099686399,
+STORE, 94586638516224, 94586640850943,
+SNULL, 94586638729215, 94586640850943,
+STORE, 94586638516224, 94586638729215,
+STORE, 94586638729216, 94586640850943,
+ERASE, 94586638729216, 94586640850943,
+STORE, 94586640826368, 94586640838655,
+STORE, 94586640838656, 94586640850943,
+STORE, 140371033796608, 140371036049407,
+SNULL, 140371033939967, 140371036049407,
+STORE, 140371033796608, 140371033939967,
+STORE, 140371033939968, 140371036049407,
+ERASE, 140371033939968, 140371036049407,
+STORE, 140371036037120, 140371036045311,
+STORE, 140371036045312, 140371036049407,
+STORE, 140724100001792, 140724100005887,
+STORE, 140724099989504, 140724100001791,
+STORE, 140371036008448, 140371036037119,
+STORE, 140371036000256, 140371036008447,
+STORE, 140371031683072, 140371033796607,
+SNULL, 140371031683072, 140371031695359,
+STORE, 140371031695360, 140371033796607,
+STORE, 140371031683072, 140371031695359,
+SNULL, 140371033788415, 140371033796607,
+STORE, 140371031695360, 140371033788415,
+STORE, 140371033788416, 140371033796607,
+ERASE, 140371033788416, 140371033796607,
+STORE, 140371033788416, 140371033796607,
+STORE, 140371027886080, 140371031683071,
+SNULL, 140371027886080, 140371029544959,
+STORE, 140371029544960, 140371031683071,
+STORE, 140371027886080, 140371029544959,
+SNULL, 140371031642111, 140371031683071,
+STORE, 140371029544960, 140371031642111,
+STORE, 140371031642112, 140371031683071,
+SNULL, 140371031642112, 140371031666687,
+STORE, 140371031666688, 140371031683071,
+STORE, 140371031642112, 140371031666687,
+ERASE, 140371031642112, 140371031666687,
+STORE, 140371031642112, 140371031666687,
+ERASE, 140371031666688, 140371031683071,
+STORE, 140371031666688, 140371031683071,
+STORE, 140371035992064, 140371036008447,
+SNULL, 140371031658495, 140371031666687,
+STORE, 140371031642112, 140371031658495,
+STORE, 140371031658496, 140371031666687,
+SNULL, 140371033792511, 140371033796607,
+STORE, 140371033788416, 140371033792511,
+STORE, 140371033792512, 140371033796607,
+SNULL, 94586640830463, 94586640838655,
+STORE, 94586640826368, 94586640830463,
+STORE, 94586640830464, 94586640838655,
+SNULL, 140371036041215, 140371036045311,
+STORE, 140371036037120, 140371036041215,
+STORE, 140371036041216, 140371036045311,
+ERASE, 140371036008448, 140371036037119,
+STORE, 94586663849984, 94586663985151,
+STORE, 140371034308608, 140371035992063,
+STORE, 94586663849984, 94586664120319,
+STORE, 94586663849984, 94586664255487,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727532937216, 140737488351231,
+SNULL, 140727532945407, 140737488351231,
+STORE, 140727532937216, 140727532945407,
+STORE, 140727532806144, 140727532945407,
+STORE, 94849780191232, 94849782525951,
+SNULL, 94849780404223, 94849782525951,
+STORE, 94849780191232, 94849780404223,
+STORE, 94849780404224, 94849782525951,
+ERASE, 94849780404224, 94849782525951,
+STORE, 94849782501376, 94849782513663,
+STORE, 94849782513664, 94849782525951,
+STORE, 140382070218752, 140382072471551,
+SNULL, 140382070362111, 140382072471551,
+STORE, 140382070218752, 140382070362111,
+STORE, 140382070362112, 140382072471551,
+ERASE, 140382070362112, 140382072471551,
+STORE, 140382072459264, 140382072467455,
+STORE, 140382072467456, 140382072471551,
+STORE, 140727533092864, 140727533096959,
+STORE, 140727533080576, 140727533092863,
+STORE, 140382072430592, 140382072459263,
+STORE, 140382072422400, 140382072430591,
+STORE, 140382068105216, 140382070218751,
+SNULL, 140382068105216, 140382068117503,
+STORE, 140382068117504, 140382070218751,
+STORE, 140382068105216, 140382068117503,
+SNULL, 140382070210559, 140382070218751,
+STORE, 140382068117504, 140382070210559,
+STORE, 140382070210560, 140382070218751,
+ERASE, 140382070210560, 140382070218751,
+STORE, 140382070210560, 140382070218751,
+STORE, 140382064308224, 140382068105215,
+SNULL, 140382064308224, 140382065967103,
+STORE, 140382065967104, 140382068105215,
+STORE, 140382064308224, 140382065967103,
+SNULL, 140382068064255, 140382068105215,
+STORE, 140382065967104, 140382068064255,
+STORE, 140382068064256, 140382068105215,
+SNULL, 140382068064256, 140382068088831,
+STORE, 140382068088832, 140382068105215,
+STORE, 140382068064256, 140382068088831,
+ERASE, 140382068064256, 140382068088831,
+STORE, 140382068064256, 140382068088831,
+ERASE, 140382068088832, 140382068105215,
+STORE, 140382068088832, 140382068105215,
+STORE, 140382072414208, 140382072430591,
+SNULL, 140382068080639, 140382068088831,
+STORE, 140382068064256, 140382068080639,
+STORE, 140382068080640, 140382068088831,
+SNULL, 140382070214655, 140382070218751,
+STORE, 140382070210560, 140382070214655,
+STORE, 140382070214656, 140382070218751,
+SNULL, 94849782505471, 94849782513663,
+STORE, 94849782501376, 94849782505471,
+STORE, 94849782505472, 94849782513663,
+SNULL, 140382072463359, 140382072467455,
+STORE, 140382072459264, 140382072463359,
+STORE, 140382072463360, 140382072467455,
+ERASE, 140382072430592, 140382072459263,
+STORE, 94849782845440, 94849782980607,
+STORE, 140382070730752, 140382072414207,
+STORE, 94849782845440, 94849783115775,
+STORE, 94849782845440, 94849783250943,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722594377728, 140737488351231,
+SNULL, 140722594385919, 140737488351231,
+STORE, 140722594377728, 140722594385919,
+STORE, 140722594246656, 140722594385919,
+STORE, 94421466353664, 94421468577791,
+SNULL, 94421466464255, 94421468577791,
+STORE, 94421466353664, 94421466464255,
+STORE, 94421466464256, 94421468577791,
+ERASE, 94421466464256, 94421468577791,
+STORE, 94421468557312, 94421468569599,
+STORE, 94421468569600, 94421468577791,
+STORE, 140345458057216, 140345460310015,
+SNULL, 140345458200575, 140345460310015,
+STORE, 140345458057216, 140345458200575,
+STORE, 140345458200576, 140345460310015,
+ERASE, 140345458200576, 140345460310015,
+STORE, 140345460297728, 140345460305919,
+STORE, 140345460305920, 140345460310015,
+STORE, 140722595557376, 140722595561471,
+STORE, 140722595545088, 140722595557375,
+STORE, 140345460269056, 140345460297727,
+STORE, 140345460260864, 140345460269055,
+STORE, 140345454260224, 140345458057215,
+SNULL, 140345454260224, 140345455919103,
+STORE, 140345455919104, 140345458057215,
+STORE, 140345454260224, 140345455919103,
+SNULL, 140345458016255, 140345458057215,
+STORE, 140345455919104, 140345458016255,
+STORE, 140345458016256, 140345458057215,
+SNULL, 140345458016256, 140345458040831,
+STORE, 140345458040832, 140345458057215,
+STORE, 140345458016256, 140345458040831,
+ERASE, 140345458016256, 140345458040831,
+STORE, 140345458016256, 140345458040831,
+ERASE, 140345458040832, 140345458057215,
+STORE, 140345458040832, 140345458057215,
+SNULL, 140345458032639, 140345458040831,
+STORE, 140345458016256, 140345458032639,
+STORE, 140345458032640, 140345458040831,
+SNULL, 94421468565503, 94421468569599,
+STORE, 94421468557312, 94421468565503,
+STORE, 94421468565504, 94421468569599,
+SNULL, 140345460301823, 140345460305919,
+STORE, 140345460297728, 140345460301823,
+STORE, 140345460301824, 140345460305919,
+ERASE, 140345460269056, 140345460297727,
+STORE, 94421496004608, 94421496139775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726096302080, 140737488351231,
+SNULL, 140726096310271, 140737488351231,
+STORE, 140726096302080, 140726096310271,
+STORE, 140726096171008, 140726096310271,
+STORE, 94101992124416, 94101994459135,
+SNULL, 94101992337407, 94101994459135,
+STORE, 94101992124416, 94101992337407,
+STORE, 94101992337408, 94101994459135,
+ERASE, 94101992337408, 94101994459135,
+STORE, 94101994434560, 94101994446847,
+STORE, 94101994446848, 94101994459135,
+STORE, 140192085594112, 140192087846911,
+SNULL, 140192085737471, 140192087846911,
+STORE, 140192085594112, 140192085737471,
+STORE, 140192085737472, 140192087846911,
+ERASE, 140192085737472, 140192087846911,
+STORE, 140192087834624, 140192087842815,
+STORE, 140192087842816, 140192087846911,
+STORE, 140726096375808, 140726096379903,
+STORE, 140726096363520, 140726096375807,
+STORE, 140192087805952, 140192087834623,
+STORE, 140192087797760, 140192087805951,
+STORE, 140192083480576, 140192085594111,
+SNULL, 140192083480576, 140192083492863,
+STORE, 140192083492864, 140192085594111,
+STORE, 140192083480576, 140192083492863,
+SNULL, 140192085585919, 140192085594111,
+STORE, 140192083492864, 140192085585919,
+STORE, 140192085585920, 140192085594111,
+ERASE, 140192085585920, 140192085594111,
+STORE, 140192085585920, 140192085594111,
+STORE, 140192079683584, 140192083480575,
+SNULL, 140192079683584, 140192081342463,
+STORE, 140192081342464, 140192083480575,
+STORE, 140192079683584, 140192081342463,
+SNULL, 140192083439615, 140192083480575,
+STORE, 140192081342464, 140192083439615,
+STORE, 140192083439616, 140192083480575,
+SNULL, 140192083439616, 140192083464191,
+STORE, 140192083464192, 140192083480575,
+STORE, 140192083439616, 140192083464191,
+ERASE, 140192083439616, 140192083464191,
+STORE, 140192083439616, 140192083464191,
+ERASE, 140192083464192, 140192083480575,
+STORE, 140192083464192, 140192083480575,
+STORE, 140192087789568, 140192087805951,
+SNULL, 140192083455999, 140192083464191,
+STORE, 140192083439616, 140192083455999,
+STORE, 140192083456000, 140192083464191,
+SNULL, 140192085590015, 140192085594111,
+STORE, 140192085585920, 140192085590015,
+STORE, 140192085590016, 140192085594111,
+SNULL, 94101994438655, 94101994446847,
+STORE, 94101994434560, 94101994438655,
+STORE, 94101994438656, 94101994446847,
+SNULL, 140192087838719, 140192087842815,
+STORE, 140192087834624, 140192087838719,
+STORE, 140192087838720, 140192087842815,
+ERASE, 140192087805952, 140192087834623,
+STORE, 94102011887616, 94102012022783,
+STORE, 140192086106112, 140192087789567,
+STORE, 94102011887616, 94102012157951,
+STORE, 94102011887616, 94102012293119,
+STORE, 94102011887616, 94102012440575,
+SNULL, 94102012428287, 94102012440575,
+STORE, 94102011887616, 94102012428287,
+STORE, 94102012428288, 94102012440575,
+ERASE, 94102012428288, 94102012440575,
+STORE, 94102011887616, 94102012579839,
+STORE, 94102011887616, 94102012715007,
+SNULL, 94102012694527, 94102012715007,
+STORE, 94102011887616, 94102012694527,
+STORE, 94102012694528, 94102012715007,
+ERASE, 94102012694528, 94102012715007,
+STORE, 94102011887616, 94102012833791,
+STORE, 94102011887616, 94102012968959,
+SNULL, 94102012927999, 94102012968959,
+STORE, 94102011887616, 94102012927999,
+STORE, 94102012928000, 94102012968959,
+ERASE, 94102012928000, 94102012968959,
+STORE, 94102011887616, 94102013091839,
+SNULL, 94102013075455, 94102013091839,
+STORE, 94102011887616, 94102013075455,
+STORE, 94102013075456, 94102013091839,
+ERASE, 94102013075456, 94102013091839,
+STORE, 94102011887616, 94102013210623,
+STORE, 94102011887616, 94102013345791,
+STORE, 93968727965696, 93968728178687,
+STORE, 93968730275840, 93968730279935,
+STORE, 93968730279936, 93968730288127,
+STORE, 93968730288128, 93968730300415,
+STORE, 93968731140096, 93968732704767,
+STORE, 140588443168768, 140588444827647,
+STORE, 140588444827648, 140588446924799,
+STORE, 140588446924800, 140588446941183,
+STORE, 140588446941184, 140588446949375,
+STORE, 140588446949376, 140588446965759,
+STORE, 140588446965760, 140588446978047,
+STORE, 140588446978048, 140588449071103,
+STORE, 140588449071104, 140588449075199,
+STORE, 140588449075200, 140588449079295,
+STORE, 140588449079296, 140588449222655,
+STORE, 140588449591296, 140588451274751,
+STORE, 140588451274752, 140588451291135,
+STORE, 140588451319808, 140588451323903,
+STORE, 140588451323904, 140588451327999,
+STORE, 140588451328000, 140588451332095,
+STORE, 140733877239808, 140733877379071,
+STORE, 140733878702080, 140733878714367,
+STORE, 140733878714368, 140733878718463,
+STORE, 93968727965696, 93968728178687,
+STORE, 93968730275840, 93968730279935,
+STORE, 93968730279936, 93968730288127,
+STORE, 93968730288128, 93968730300415,
+STORE, 93968731140096, 93968732991487,
+STORE, 140588443168768, 140588444827647,
+STORE, 140588444827648, 140588446924799,
+STORE, 140588446924800, 140588446941183,
+STORE, 140588446941184, 140588446949375,
+STORE, 140588446949376, 140588446965759,
+STORE, 140588446965760, 140588446978047,
+STORE, 140588446978048, 140588449071103,
+STORE, 140588449071104, 140588449075199,
+STORE, 140588449075200, 140588449079295,
+STORE, 140588449079296, 140588449222655,
+STORE, 140588449591296, 140588451274751,
+STORE, 140588451274752, 140588451291135,
+STORE, 140588451319808, 140588451323903,
+STORE, 140588451323904, 140588451327999,
+STORE, 140588451328000, 140588451332095,
+STORE, 140733877239808, 140733877379071,
+STORE, 140733878702080, 140733878714367,
+STORE, 140733878714368, 140733878718463,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733054472192, 140737488351231,
+SNULL, 140733054480383, 140737488351231,
+STORE, 140733054472192, 140733054480383,
+STORE, 140733054341120, 140733054480383,
+STORE, 93992873623552, 93992875847679,
+SNULL, 93992873734143, 93992875847679,
+STORE, 93992873623552, 93992873734143,
+STORE, 93992873734144, 93992875847679,
+ERASE, 93992873734144, 93992875847679,
+STORE, 93992875827200, 93992875839487,
+STORE, 93992875839488, 93992875847679,
+STORE, 139790881488896, 139790883741695,
+SNULL, 139790881632255, 139790883741695,
+STORE, 139790881488896, 139790881632255,
+STORE, 139790881632256, 139790883741695,
+ERASE, 139790881632256, 139790883741695,
+STORE, 139790883729408, 139790883737599,
+STORE, 139790883737600, 139790883741695,
+STORE, 140733054754816, 140733054758911,
+STORE, 140733054742528, 140733054754815,
+STORE, 139790883700736, 139790883729407,
+STORE, 139790883692544, 139790883700735,
+STORE, 139790877691904, 139790881488895,
+SNULL, 139790877691904, 139790879350783,
+STORE, 139790879350784, 139790881488895,
+STORE, 139790877691904, 139790879350783,
+SNULL, 139790881447935, 139790881488895,
+STORE, 139790879350784, 139790881447935,
+STORE, 139790881447936, 139790881488895,
+SNULL, 139790881447936, 139790881472511,
+STORE, 139790881472512, 139790881488895,
+STORE, 139790881447936, 139790881472511,
+ERASE, 139790881447936, 139790881472511,
+STORE, 139790881447936, 139790881472511,
+ERASE, 139790881472512, 139790881488895,
+STORE, 139790881472512, 139790881488895,
+SNULL, 139790881464319, 139790881472511,
+STORE, 139790881447936, 139790881464319,
+STORE, 139790881464320, 139790881472511,
+SNULL, 93992875835391, 93992875839487,
+STORE, 93992875827200, 93992875835391,
+STORE, 93992875835392, 93992875839487,
+SNULL, 139790883733503, 139790883737599,
+STORE, 139790883729408, 139790883733503,
+STORE, 139790883733504, 139790883737599,
+ERASE, 139790883700736, 139790883729407,
+STORE, 93992877031424, 93992877166591,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728550887424, 140737488351231,
+SNULL, 140728550895615, 140737488351231,
+STORE, 140728550887424, 140728550895615,
+STORE, 140728550756352, 140728550895615,
+STORE, 94707634077696, 94707636301823,
+SNULL, 94707634188287, 94707636301823,
+STORE, 94707634077696, 94707634188287,
+STORE, 94707634188288, 94707636301823,
+ERASE, 94707634188288, 94707636301823,
+STORE, 94707636281344, 94707636293631,
+STORE, 94707636293632, 94707636301823,
+STORE, 140553545666560, 140553547919359,
+SNULL, 140553545809919, 140553547919359,
+STORE, 140553545666560, 140553545809919,
+STORE, 140553545809920, 140553547919359,
+ERASE, 140553545809920, 140553547919359,
+STORE, 140553547907072, 140553547915263,
+STORE, 140553547915264, 140553547919359,
+STORE, 140728552374272, 140728552378367,
+STORE, 140728552361984, 140728552374271,
+STORE, 140553547878400, 140553547907071,
+STORE, 140553547870208, 140553547878399,
+STORE, 140553541869568, 140553545666559,
+SNULL, 140553541869568, 140553543528447,
+STORE, 140553543528448, 140553545666559,
+STORE, 140553541869568, 140553543528447,
+SNULL, 140553545625599, 140553545666559,
+STORE, 140553543528448, 140553545625599,
+STORE, 140553545625600, 140553545666559,
+SNULL, 140553545625600, 140553545650175,
+STORE, 140553545650176, 140553545666559,
+STORE, 140553545625600, 140553545650175,
+ERASE, 140553545625600, 140553545650175,
+STORE, 140553545625600, 140553545650175,
+ERASE, 140553545650176, 140553545666559,
+STORE, 140553545650176, 140553545666559,
+SNULL, 140553545641983, 140553545650175,
+STORE, 140553545625600, 140553545641983,
+STORE, 140553545641984, 140553545650175,
+SNULL, 94707636289535, 94707636293631,
+STORE, 94707636281344, 94707636289535,
+STORE, 94707636289536, 94707636293631,
+SNULL, 140553547911167, 140553547915263,
+STORE, 140553547907072, 140553547911167,
+STORE, 140553547911168, 140553547915263,
+ERASE, 140553547878400, 140553547907071,
+STORE, 94707651411968, 94707651547135,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732168695808, 140737488351231,
+SNULL, 140732168703999, 140737488351231,
+STORE, 140732168695808, 140732168703999,
+STORE, 140732168564736, 140732168703999,
+STORE, 94454287859712, 94454290083839,
+SNULL, 94454287970303, 94454290083839,
+STORE, 94454287859712, 94454287970303,
+STORE, 94454287970304, 94454290083839,
+ERASE, 94454287970304, 94454290083839,
+STORE, 94454290063360, 94454290075647,
+STORE, 94454290075648, 94454290083839,
+STORE, 140564947107840, 140564949360639,
+SNULL, 140564947251199, 140564949360639,
+STORE, 140564947107840, 140564947251199,
+STORE, 140564947251200, 140564949360639,
+ERASE, 140564947251200, 140564949360639,
+STORE, 140564949348352, 140564949356543,
+STORE, 140564949356544, 140564949360639,
+STORE, 140732168843264, 140732168847359,
+STORE, 140732168830976, 140732168843263,
+STORE, 140564949319680, 140564949348351,
+STORE, 140564949311488, 140564949319679,
+STORE, 140564943310848, 140564947107839,
+SNULL, 140564943310848, 140564944969727,
+STORE, 140564944969728, 140564947107839,
+STORE, 140564943310848, 140564944969727,
+SNULL, 140564947066879, 140564947107839,
+STORE, 140564944969728, 140564947066879,
+STORE, 140564947066880, 140564947107839,
+SNULL, 140564947066880, 140564947091455,
+STORE, 140564947091456, 140564947107839,
+STORE, 140564947066880, 140564947091455,
+ERASE, 140564947066880, 140564947091455,
+STORE, 140564947066880, 140564947091455,
+ERASE, 140564947091456, 140564947107839,
+STORE, 140564947091456, 140564947107839,
+SNULL, 140564947083263, 140564947091455,
+STORE, 140564947066880, 140564947083263,
+STORE, 140564947083264, 140564947091455,
+SNULL, 94454290071551, 94454290075647,
+STORE, 94454290063360, 94454290071551,
+STORE, 94454290071552, 94454290075647,
+SNULL, 140564949352447, 140564949356543,
+STORE, 140564949348352, 140564949352447,
+STORE, 140564949352448, 140564949356543,
+ERASE, 140564949319680, 140564949348351,
+STORE, 94454316236800, 94454316371967,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735155617792, 140737488351231,
+SNULL, 140735155625983, 140737488351231,
+STORE, 140735155617792, 140735155625983,
+STORE, 140735155486720, 140735155625983,
+STORE, 93915969556480, 93915971780607,
+SNULL, 93915969667071, 93915971780607,
+STORE, 93915969556480, 93915969667071,
+STORE, 93915969667072, 93915971780607,
+ERASE, 93915969667072, 93915971780607,
+STORE, 93915971760128, 93915971772415,
+STORE, 93915971772416, 93915971780607,
+STORE, 140141164605440, 140141166858239,
+SNULL, 140141164748799, 140141166858239,
+STORE, 140141164605440, 140141164748799,
+STORE, 140141164748800, 140141166858239,
+ERASE, 140141164748800, 140141166858239,
+STORE, 140141166845952, 140141166854143,
+STORE, 140141166854144, 140141166858239,
+STORE, 140735155691520, 140735155695615,
+STORE, 140735155679232, 140735155691519,
+STORE, 140141166817280, 140141166845951,
+STORE, 140141166809088, 140141166817279,
+STORE, 140141160808448, 140141164605439,
+SNULL, 140141160808448, 140141162467327,
+STORE, 140141162467328, 140141164605439,
+STORE, 140141160808448, 140141162467327,
+SNULL, 140141164564479, 140141164605439,
+STORE, 140141162467328, 140141164564479,
+STORE, 140141164564480, 140141164605439,
+SNULL, 140141164564480, 140141164589055,
+STORE, 140141164589056, 140141164605439,
+STORE, 140141164564480, 140141164589055,
+ERASE, 140141164564480, 140141164589055,
+STORE, 140141164564480, 140141164589055,
+ERASE, 140141164589056, 140141164605439,
+STORE, 140141164589056, 140141164605439,
+SNULL, 140141164580863, 140141164589055,
+STORE, 140141164564480, 140141164580863,
+STORE, 140141164580864, 140141164589055,
+SNULL, 93915971768319, 93915971772415,
+STORE, 93915971760128, 93915971768319,
+STORE, 93915971768320, 93915971772415,
+SNULL, 140141166850047, 140141166854143,
+STORE, 140141166845952, 140141166850047,
+STORE, 140141166850048, 140141166854143,
+ERASE, 140141166817280, 140141166845951,
+STORE, 93916002775040, 93916002910207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728988409856, 140737488351231,
+SNULL, 140728988418047, 140737488351231,
+STORE, 140728988409856, 140728988418047,
+STORE, 140728988278784, 140728988418047,
+STORE, 94021634813952, 94021637038079,
+SNULL, 94021634924543, 94021637038079,
+STORE, 94021634813952, 94021634924543,
+STORE, 94021634924544, 94021637038079,
+ERASE, 94021634924544, 94021637038079,
+STORE, 94021637017600, 94021637029887,
+STORE, 94021637029888, 94021637038079,
+STORE, 140638014038016, 140638016290815,
+SNULL, 140638014181375, 140638016290815,
+STORE, 140638014038016, 140638014181375,
+STORE, 140638014181376, 140638016290815,
+ERASE, 140638014181376, 140638016290815,
+STORE, 140638016278528, 140638016286719,
+STORE, 140638016286720, 140638016290815,
+STORE, 140728988536832, 140728988540927,
+STORE, 140728988524544, 140728988536831,
+STORE, 140638016249856, 140638016278527,
+STORE, 140638016241664, 140638016249855,
+STORE, 140638010241024, 140638014038015,
+SNULL, 140638010241024, 140638011899903,
+STORE, 140638011899904, 140638014038015,
+STORE, 140638010241024, 140638011899903,
+SNULL, 140638013997055, 140638014038015,
+STORE, 140638011899904, 140638013997055,
+STORE, 140638013997056, 140638014038015,
+SNULL, 140638013997056, 140638014021631,
+STORE, 140638014021632, 140638014038015,
+STORE, 140638013997056, 140638014021631,
+ERASE, 140638013997056, 140638014021631,
+STORE, 140638013997056, 140638014021631,
+ERASE, 140638014021632, 140638014038015,
+STORE, 140638014021632, 140638014038015,
+SNULL, 140638014013439, 140638014021631,
+STORE, 140638013997056, 140638014013439,
+STORE, 140638014013440, 140638014021631,
+SNULL, 94021637025791, 94021637029887,
+STORE, 94021637017600, 94021637025791,
+STORE, 94021637025792, 94021637029887,
+SNULL, 140638016282623, 140638016286719,
+STORE, 140638016278528, 140638016282623,
+STORE, 140638016282624, 140638016286719,
+ERASE, 140638016249856, 140638016278527,
+STORE, 94021643124736, 94021643259903,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731219275776, 140737488351231,
+SNULL, 140731219283967, 140737488351231,
+STORE, 140731219275776, 140731219283967,
+STORE, 140731219144704, 140731219283967,
+STORE, 93888803647488, 93888805871615,
+SNULL, 93888803758079, 93888805871615,
+STORE, 93888803647488, 93888803758079,
+STORE, 93888803758080, 93888805871615,
+ERASE, 93888803758080, 93888805871615,
+STORE, 93888805851136, 93888805863423,
+STORE, 93888805863424, 93888805871615,
+STORE, 139630576934912, 139630579187711,
+SNULL, 139630577078271, 139630579187711,
+STORE, 139630576934912, 139630577078271,
+STORE, 139630577078272, 139630579187711,
+ERASE, 139630577078272, 139630579187711,
+STORE, 139630579175424, 139630579183615,
+STORE, 139630579183616, 139630579187711,
+STORE, 140731219718144, 140731219722239,
+STORE, 140731219705856, 140731219718143,
+STORE, 139630579146752, 139630579175423,
+STORE, 139630579138560, 139630579146751,
+STORE, 139630573137920, 139630576934911,
+SNULL, 139630573137920, 139630574796799,
+STORE, 139630574796800, 139630576934911,
+STORE, 139630573137920, 139630574796799,
+SNULL, 139630576893951, 139630576934911,
+STORE, 139630574796800, 139630576893951,
+STORE, 139630576893952, 139630576934911,
+SNULL, 139630576893952, 139630576918527,
+STORE, 139630576918528, 139630576934911,
+STORE, 139630576893952, 139630576918527,
+ERASE, 139630576893952, 139630576918527,
+STORE, 139630576893952, 139630576918527,
+ERASE, 139630576918528, 139630576934911,
+STORE, 139630576918528, 139630576934911,
+SNULL, 139630576910335, 139630576918527,
+STORE, 139630576893952, 139630576910335,
+STORE, 139630576910336, 139630576918527,
+SNULL, 93888805859327, 93888805863423,
+STORE, 93888805851136, 93888805859327,
+STORE, 93888805859328, 93888805863423,
+SNULL, 139630579179519, 139630579183615,
+STORE, 139630579175424, 139630579179519,
+STORE, 139630579179520, 139630579183615,
+ERASE, 139630579146752, 139630579175423,
+STORE, 93888822235136, 93888822370303,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733391151104, 140737488351231,
+SNULL, 140733391159295, 140737488351231,
+STORE, 140733391151104, 140733391159295,
+STORE, 140733391020032, 140733391159295,
+STORE, 94393875324928, 94393877549055,
+SNULL, 94393875435519, 94393877549055,
+STORE, 94393875324928, 94393875435519,
+STORE, 94393875435520, 94393877549055,
+ERASE, 94393875435520, 94393877549055,
+STORE, 94393877528576, 94393877540863,
+STORE, 94393877540864, 94393877549055,
+STORE, 140292111740928, 140292113993727,
+SNULL, 140292111884287, 140292113993727,
+STORE, 140292111740928, 140292111884287,
+STORE, 140292111884288, 140292113993727,
+ERASE, 140292111884288, 140292113993727,
+STORE, 140292113981440, 140292113989631,
+STORE, 140292113989632, 140292113993727,
+STORE, 140733391532032, 140733391536127,
+STORE, 140733391519744, 140733391532031,
+STORE, 140292113952768, 140292113981439,
+STORE, 140292113944576, 140292113952767,
+STORE, 140292107943936, 140292111740927,
+SNULL, 140292107943936, 140292109602815,
+STORE, 140292109602816, 140292111740927,
+STORE, 140292107943936, 140292109602815,
+SNULL, 140292111699967, 140292111740927,
+STORE, 140292109602816, 140292111699967,
+STORE, 140292111699968, 140292111740927,
+SNULL, 140292111699968, 140292111724543,
+STORE, 140292111724544, 140292111740927,
+STORE, 140292111699968, 140292111724543,
+ERASE, 140292111699968, 140292111724543,
+STORE, 140292111699968, 140292111724543,
+ERASE, 140292111724544, 140292111740927,
+STORE, 140292111724544, 140292111740927,
+SNULL, 140292111716351, 140292111724543,
+STORE, 140292111699968, 140292111716351,
+STORE, 140292111716352, 140292111724543,
+SNULL, 94393877536767, 94393877540863,
+STORE, 94393877528576, 94393877536767,
+STORE, 94393877536768, 94393877540863,
+SNULL, 140292113985535, 140292113989631,
+STORE, 140292113981440, 140292113985535,
+STORE, 140292113985536, 140292113989631,
+ERASE, 140292113952768, 140292113981439,
+STORE, 94393909342208, 94393909477375,
+STORE, 94458367512576, 94458367725567,
+STORE, 94458369822720, 94458369826815,
+STORE, 94458369826816, 94458369835007,
+STORE, 94458369835008, 94458369847295,
+STORE, 94458393292800, 94458399666175,
+STORE, 140619773841408, 140619775500287,
+STORE, 140619775500288, 140619777597439,
+STORE, 140619777597440, 140619777613823,
+STORE, 140619777613824, 140619777622015,
+STORE, 140619777622016, 140619777638399,
+STORE, 140619777638400, 140619777650687,
+STORE, 140619777650688, 140619779743743,
+STORE, 140619779743744, 140619779747839,
+STORE, 140619779747840, 140619779751935,
+STORE, 140619779751936, 140619779895295,
+STORE, 140619780263936, 140619781947391,
+STORE, 140619781947392, 140619781963775,
+STORE, 140619781992448, 140619781996543,
+STORE, 140619781996544, 140619782000639,
+STORE, 140619782000640, 140619782004735,
+STORE, 140725811675136, 140725811814399,
+STORE, 140725812813824, 140725812826111,
+STORE, 140725812826112, 140725812830207,
+STORE, 94458367512576, 94458367725567,
+STORE, 94458369822720, 94458369826815,
+STORE, 94458369826816, 94458369835007,
+STORE, 94458369835008, 94458369847295,
+STORE, 94458393292800, 94458400366591,
+STORE, 140619773841408, 140619775500287,
+STORE, 140619775500288, 140619777597439,
+STORE, 140619777597440, 140619777613823,
+STORE, 140619777613824, 140619777622015,
+STORE, 140619777622016, 140619777638399,
+STORE, 140619777638400, 140619777650687,
+STORE, 140619777650688, 140619779743743,
+STORE, 140619779743744, 140619779747839,
+STORE, 140619779747840, 140619779751935,
+STORE, 140619779751936, 140619779895295,
+STORE, 140619780263936, 140619781947391,
+STORE, 140619781947392, 140619781963775,
+STORE, 140619781992448, 140619781996543,
+STORE, 140619781996544, 140619782000639,
+STORE, 140619782000640, 140619782004735,
+STORE, 140725811675136, 140725811814399,
+STORE, 140725812813824, 140725812826111,
+STORE, 140725812826112, 140725812830207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728740679680, 140737488351231,
+SNULL, 140728740687871, 140737488351231,
+STORE, 140728740679680, 140728740687871,
+STORE, 140728740548608, 140728740687871,
+STORE, 94764075249664, 94764077473791,
+SNULL, 94764075360255, 94764077473791,
+STORE, 94764075249664, 94764075360255,
+STORE, 94764075360256, 94764077473791,
+ERASE, 94764075360256, 94764077473791,
+STORE, 94764077453312, 94764077465599,
+STORE, 94764077465600, 94764077473791,
+STORE, 139766406791168, 139766409043967,
+SNULL, 139766406934527, 139766409043967,
+STORE, 139766406791168, 139766406934527,
+STORE, 139766406934528, 139766409043967,
+ERASE, 139766406934528, 139766409043967,
+STORE, 139766409031680, 139766409039871,
+STORE, 139766409039872, 139766409043967,
+STORE, 140728740913152, 140728740917247,
+STORE, 140728740900864, 140728740913151,
+STORE, 139766409003008, 139766409031679,
+STORE, 139766408994816, 139766409003007,
+STORE, 139766402994176, 139766406791167,
+SNULL, 139766402994176, 139766404653055,
+STORE, 139766404653056, 139766406791167,
+STORE, 139766402994176, 139766404653055,
+SNULL, 139766406750207, 139766406791167,
+STORE, 139766404653056, 139766406750207,
+STORE, 139766406750208, 139766406791167,
+SNULL, 139766406750208, 139766406774783,
+STORE, 139766406774784, 139766406791167,
+STORE, 139766406750208, 139766406774783,
+ERASE, 139766406750208, 139766406774783,
+STORE, 139766406750208, 139766406774783,
+ERASE, 139766406774784, 139766406791167,
+STORE, 139766406774784, 139766406791167,
+SNULL, 139766406766591, 139766406774783,
+STORE, 139766406750208, 139766406766591,
+STORE, 139766406766592, 139766406774783,
+SNULL, 94764077461503, 94764077465599,
+STORE, 94764077453312, 94764077461503,
+STORE, 94764077461504, 94764077465599,
+SNULL, 139766409035775, 139766409039871,
+STORE, 139766409031680, 139766409035775,
+STORE, 139766409035776, 139766409039871,
+ERASE, 139766409003008, 139766409031679,
+STORE, 94764090458112, 94764090593279,
+STORE, 94758057480192, 94758057590783,
+STORE, 94758059683840, 94758059692031,
+STORE, 94758059692032, 94758059696127,
+STORE, 94758059696128, 94758059704319,
+STORE, 94758083215360, 94758083350527,
+STORE, 139951456772096, 139951458430975,
+STORE, 139951458430976, 139951460528127,
+STORE, 139951460528128, 139951460544511,
+STORE, 139951460544512, 139951460552703,
+STORE, 139951460552704, 139951460569087,
+STORE, 139951460569088, 139951460712447,
+STORE, 139951462772736, 139951462780927,
+STORE, 139951462809600, 139951462813695,
+STORE, 139951462813696, 139951462817791,
+STORE, 139951462817792, 139951462821887,
+STORE, 140734098313216, 140734098452479,
+STORE, 140734098911232, 140734098923519,
+STORE, 140734098923520, 140734098927615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724904095744, 140737488351231,
+SNULL, 140724904103935, 140737488351231,
+STORE, 140724904095744, 140724904103935,
+STORE, 140724903964672, 140724904103935,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140408497864704, 140408500117503,
+SNULL, 140408498008063, 140408500117503,
+STORE, 140408497864704, 140408498008063,
+STORE, 140408498008064, 140408500117503,
+ERASE, 140408498008064, 140408500117503,
+STORE, 140408500105216, 140408500113407,
+STORE, 140408500113408, 140408500117503,
+STORE, 140724905369600, 140724905373695,
+STORE, 140724905357312, 140724905369599,
+STORE, 140408500076544, 140408500105215,
+STORE, 140408500068352, 140408500076543,
+STORE, 140408494702592, 140408497864703,
+SNULL, 140408494702592, 140408495763455,
+STORE, 140408495763456, 140408497864703,
+STORE, 140408494702592, 140408495763455,
+SNULL, 140408497856511, 140408497864703,
+STORE, 140408495763456, 140408497856511,
+STORE, 140408497856512, 140408497864703,
+ERASE, 140408497856512, 140408497864703,
+STORE, 140408497856512, 140408497864703,
+STORE, 140408490905600, 140408494702591,
+SNULL, 140408490905600, 140408492564479,
+STORE, 140408492564480, 140408494702591,
+STORE, 140408490905600, 140408492564479,
+SNULL, 140408494661631, 140408494702591,
+STORE, 140408492564480, 140408494661631,
+STORE, 140408494661632, 140408494702591,
+SNULL, 140408494661632, 140408494686207,
+STORE, 140408494686208, 140408494702591,
+STORE, 140408494661632, 140408494686207,
+ERASE, 140408494661632, 140408494686207,
+STORE, 140408494661632, 140408494686207,
+ERASE, 140408494686208, 140408494702591,
+STORE, 140408494686208, 140408494702591,
+STORE, 140408500056064, 140408500076543,
+SNULL, 140408494678015, 140408494686207,
+STORE, 140408494661632, 140408494678015,
+STORE, 140408494678016, 140408494686207,
+SNULL, 140408497860607, 140408497864703,
+STORE, 140408497856512, 140408497860607,
+STORE, 140408497860608, 140408497864703,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140408500109311, 140408500113407,
+STORE, 140408500105216, 140408500109311,
+STORE, 140408500109312, 140408500113407,
+ERASE, 140408500076544, 140408500105215,
+STORE, 25235456, 25370623,
+STORE, 25235456, 25518079,
+STORE, 140408498372608, 140408500056063,
+STORE, 94543937388544, 94543937499135,
+STORE, 94543939592192, 94543939600383,
+STORE, 94543939600384, 94543939604479,
+STORE, 94543939604480, 94543939612671,
+STORE, 94543941447680, 94543941582847,
+STORE, 140282621947904, 140282623606783,
+STORE, 140282623606784, 140282625703935,
+STORE, 140282625703936, 140282625720319,
+STORE, 140282625720320, 140282625728511,
+STORE, 140282625728512, 140282625744895,
+STORE, 140282625744896, 140282625888255,
+STORE, 140282627948544, 140282627956735,
+STORE, 140282627985408, 140282627989503,
+STORE, 140282627989504, 140282627993599,
+STORE, 140282627993600, 140282627997695,
+STORE, 140728295723008, 140728295862271,
+STORE, 140728296476672, 140728296488959,
+STORE, 140728296488960, 140728296493055,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431510691839,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431510691839,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140736944451584, 140737488351231,
+SNULL, 140736944463871, 140737488351231,
+STORE, 140736944451584, 140736944463871,
+STORE, 140736944320512, 140736944463871,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 139693609893888, 139693612146687,
+SNULL, 139693610037247, 139693612146687,
+STORE, 139693609893888, 139693610037247,
+STORE, 139693610037248, 139693612146687,
+ERASE, 139693610037248, 139693612146687,
+STORE, 139693612134400, 139693612142591,
+STORE, 139693612142592, 139693612146687,
+STORE, 140736945152000, 140736945156095,
+STORE, 140736945139712, 140736945151999,
+STORE, 139693612105728, 139693612134399,
+STORE, 139693612097536, 139693612105727,
+STORE, 139693606060032, 139693609893887,
+SNULL, 139693606060032, 139693607768063,
+STORE, 139693607768064, 139693609893887,
+STORE, 139693606060032, 139693607768063,
+SNULL, 139693609861119, 139693609893887,
+STORE, 139693607768064, 139693609861119,
+STORE, 139693609861120, 139693609893887,
+ERASE, 139693609861120, 139693609893887,
+STORE, 139693609861120, 139693609893887,
+STORE, 139693603864576, 139693606060031,
+SNULL, 139693603864576, 139693603958783,
+STORE, 139693603958784, 139693606060031,
+STORE, 139693603864576, 139693603958783,
+SNULL, 139693606051839, 139693606060031,
+STORE, 139693603958784, 139693606051839,
+STORE, 139693606051840, 139693606060031,
+ERASE, 139693606051840, 139693606060031,
+STORE, 139693606051840, 139693606060031,
+STORE, 139693601345536, 139693603864575,
+SNULL, 139693601345536, 139693601759231,
+STORE, 139693601759232, 139693603864575,
+STORE, 139693601345536, 139693601759231,
+SNULL, 139693603852287, 139693603864575,
+STORE, 139693601759232, 139693603852287,
+STORE, 139693603852288, 139693603864575,
+ERASE, 139693603852288, 139693603864575,
+STORE, 139693603852288, 139693603864575,
+STORE, 139693598711808, 139693601345535,
+SNULL, 139693598711808, 139693599240191,
+STORE, 139693599240192, 139693601345535,
+STORE, 139693598711808, 139693599240191,
+SNULL, 139693601337343, 139693601345535,
+STORE, 139693599240192, 139693601337343,
+STORE, 139693601337344, 139693601345535,
+ERASE, 139693601337344, 139693601345535,
+STORE, 139693601337344, 139693601345535,
+STORE, 139693596598272, 139693598711807,
+SNULL, 139693596598272, 139693596610559,
+STORE, 139693596610560, 139693598711807,
+STORE, 139693596598272, 139693596610559,
+SNULL, 139693598703615, 139693598711807,
+STORE, 139693596610560, 139693598703615,
+STORE, 139693598703616, 139693598711807,
+ERASE, 139693598703616, 139693598711807,
+STORE, 139693598703616, 139693598711807,
+STORE, 139693594394624, 139693596598271,
+SNULL, 139693594394624, 139693594497023,
+STORE, 139693594497024, 139693596598271,
+STORE, 139693594394624, 139693594497023,
+SNULL, 139693596590079, 139693596598271,
+STORE, 139693594497024, 139693596590079,
+STORE, 139693596590080, 139693596598271,
+ERASE, 139693596590080, 139693596598271,
+STORE, 139693596590080, 139693596598271,
+STORE, 139693612089344, 139693612105727,
+STORE, 139693591232512, 139693594394623,
+SNULL, 139693591232512, 139693592293375,
+STORE, 139693592293376, 139693594394623,
+STORE, 139693591232512, 139693592293375,
+SNULL, 139693594386431, 139693594394623,
+STORE, 139693592293376, 139693594386431,
+STORE, 139693594386432, 139693594394623,
+ERASE, 139693594386432, 139693594394623,
+STORE, 139693594386432, 139693594394623,
+STORE, 139693587435520, 139693591232511,
+SNULL, 139693587435520, 139693589094399,
+STORE, 139693589094400, 139693591232511,
+STORE, 139693587435520, 139693589094399,
+SNULL, 139693591191551, 139693591232511,
+STORE, 139693589094400, 139693591191551,
+STORE, 139693591191552, 139693591232511,
+SNULL, 139693591191552, 139693591216127,
+STORE, 139693591216128, 139693591232511,
+STORE, 139693591191552, 139693591216127,
+ERASE, 139693591191552, 139693591216127,
+STORE, 139693591191552, 139693591216127,
+ERASE, 139693591216128, 139693591232511,
+STORE, 139693591216128, 139693591232511,
+STORE, 139693612077056, 139693612105727,
+SNULL, 139693591207935, 139693591216127,
+STORE, 139693591191552, 139693591207935,
+STORE, 139693591207936, 139693591216127,
+SNULL, 139693594390527, 139693594394623,
+STORE, 139693594386432, 139693594390527,
+STORE, 139693594390528, 139693594394623,
+SNULL, 139693596594175, 139693596598271,
+STORE, 139693596590080, 139693596594175,
+STORE, 139693596594176, 139693596598271,
+SNULL, 139693598707711, 139693598711807,
+STORE, 139693598703616, 139693598707711,
+STORE, 139693598707712, 139693598711807,
+SNULL, 139693601341439, 139693601345535,
+STORE, 139693601337344, 139693601341439,
+STORE, 139693601341440, 139693601345535,
+SNULL, 139693603860479, 139693603864575,
+STORE, 139693603852288, 139693603860479,
+STORE, 139693603860480, 139693603864575,
+SNULL, 139693606055935, 139693606060031,
+STORE, 139693606051840, 139693606055935,
+STORE, 139693606055936, 139693606060031,
+SNULL, 139693609865215, 139693609893887,
+STORE, 139693609861120, 139693609865215,
+STORE, 139693609865216, 139693609893887,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 139693612138495, 139693612142591,
+STORE, 139693612134400, 139693612138495,
+STORE, 139693612138496, 139693612142591,
+ERASE, 139693612105728, 139693612134399,
+STORE, 39976960, 40112127,
+STORE, 139693610393600, 139693612077055,
+STORE, 139693612130304, 139693612134399,
+STORE, 139693610258432, 139693610393599,
+STORE, 39976960, 40255487,
+STORE, 139693585338368, 139693587435519,
+STORE, 139693612122112, 139693612134399,
+STORE, 139693612113920, 139693612134399,
+STORE, 139693612077056, 139693612113919,
+STORE, 139693610242048, 139693610393599,
+STORE, 39976960, 40390655,
+STORE, 39976960, 40546303,
+STORE, 139693610233856, 139693610393599,
+STORE, 139693610225664, 139693610393599,
+STORE, 39976960, 40714239,
+STORE, 139693610209280, 139693610393599,
+STORE, 39976960, 40861695,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431528759295,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729993904128, 140737488351231,
+SNULL, 140729993912319, 140737488351231,
+STORE, 140729993904128, 140729993912319,
+STORE, 140729993773056, 140729993912319,
+STORE, 93926271991808, 93926274215935,
+SNULL, 93926272102399, 93926274215935,
+STORE, 93926271991808, 93926272102399,
+STORE, 93926272102400, 93926274215935,
+ERASE, 93926272102400, 93926274215935,
+STORE, 93926274195456, 93926274207743,
+STORE, 93926274207744, 93926274215935,
+STORE, 139962167296000, 139962169548799,
+SNULL, 139962167439359, 139962169548799,
+STORE, 139962167296000, 139962167439359,
+STORE, 139962167439360, 139962169548799,
+ERASE, 139962167439360, 139962169548799,
+STORE, 139962169536512, 139962169544703,
+STORE, 139962169544704, 139962169548799,
+STORE, 140729995096064, 140729995100159,
+STORE, 140729995083776, 140729995096063,
+STORE, 139962169507840, 139962169536511,
+STORE, 139962169499648, 139962169507839,
+STORE, 139962163499008, 139962167295999,
+SNULL, 139962163499008, 139962165157887,
+STORE, 139962165157888, 139962167295999,
+STORE, 139962163499008, 139962165157887,
+SNULL, 139962167255039, 139962167295999,
+STORE, 139962165157888, 139962167255039,
+STORE, 139962167255040, 139962167295999,
+SNULL, 139962167255040, 139962167279615,
+STORE, 139962167279616, 139962167295999,
+STORE, 139962167255040, 139962167279615,
+ERASE, 139962167255040, 139962167279615,
+STORE, 139962167255040, 139962167279615,
+ERASE, 139962167279616, 139962167295999,
+STORE, 139962167279616, 139962167295999,
+SNULL, 139962167271423, 139962167279615,
+STORE, 139962167255040, 139962167271423,
+STORE, 139962167271424, 139962167279615,
+SNULL, 93926274203647, 93926274207743,
+STORE, 93926274195456, 93926274203647,
+STORE, 93926274203648, 93926274207743,
+SNULL, 139962169540607, 139962169544703,
+STORE, 139962169536512, 139962169540607,
+STORE, 139962169540608, 139962169544703,
+ERASE, 139962169507840, 139962169536511,
+STORE, 93926291120128, 93926291255295,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724960579584, 140737488351231,
+SNULL, 140724960587775, 140737488351231,
+STORE, 140724960579584, 140724960587775,
+STORE, 140724960448512, 140724960587775,
+STORE, 94246489489408, 94246491713535,
+SNULL, 94246489599999, 94246491713535,
+STORE, 94246489489408, 94246489599999,
+STORE, 94246489600000, 94246491713535,
+ERASE, 94246489600000, 94246491713535,
+STORE, 94246491693056, 94246491705343,
+STORE, 94246491705344, 94246491713535,
+STORE, 140098174926848, 140098177179647,
+SNULL, 140098175070207, 140098177179647,
+STORE, 140098174926848, 140098175070207,
+STORE, 140098175070208, 140098177179647,
+ERASE, 140098175070208, 140098177179647,
+STORE, 140098177167360, 140098177175551,
+STORE, 140098177175552, 140098177179647,
+STORE, 140724961439744, 140724961443839,
+STORE, 140724961427456, 140724961439743,
+STORE, 140098177138688, 140098177167359,
+STORE, 140098177130496, 140098177138687,
+STORE, 140098171129856, 140098174926847,
+SNULL, 140098171129856, 140098172788735,
+STORE, 140098172788736, 140098174926847,
+STORE, 140098171129856, 140098172788735,
+SNULL, 140098174885887, 140098174926847,
+STORE, 140098172788736, 140098174885887,
+STORE, 140098174885888, 140098174926847,
+SNULL, 140098174885888, 140098174910463,
+STORE, 140098174910464, 140098174926847,
+STORE, 140098174885888, 140098174910463,
+ERASE, 140098174885888, 140098174910463,
+STORE, 140098174885888, 140098174910463,
+ERASE, 140098174910464, 140098174926847,
+STORE, 140098174910464, 140098174926847,
+SNULL, 140098174902271, 140098174910463,
+STORE, 140098174885888, 140098174902271,
+STORE, 140098174902272, 140098174910463,
+SNULL, 94246491701247, 94246491705343,
+STORE, 94246491693056, 94246491701247,
+STORE, 94246491701248, 94246491705343,
+SNULL, 140098177171455, 140098177175551,
+STORE, 140098177167360, 140098177171455,
+STORE, 140098177171456, 140098177175551,
+ERASE, 140098177138688, 140098177167359,
+STORE, 94246516998144, 94246517133311,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730522918912, 140737488351231,
+SNULL, 140730522927103, 140737488351231,
+STORE, 140730522918912, 140730522927103,
+STORE, 140730522787840, 140730522927103,
+STORE, 94196043120640, 94196045344767,
+SNULL, 94196043231231, 94196045344767,
+STORE, 94196043120640, 94196043231231,
+STORE, 94196043231232, 94196045344767,
+ERASE, 94196043231232, 94196045344767,
+STORE, 94196045324288, 94196045336575,
+STORE, 94196045336576, 94196045344767,
+STORE, 139815918940160, 139815921192959,
+SNULL, 139815919083519, 139815921192959,
+STORE, 139815918940160, 139815919083519,
+STORE, 139815919083520, 139815921192959,
+ERASE, 139815919083520, 139815921192959,
+STORE, 139815921180672, 139815921188863,
+STORE, 139815921188864, 139815921192959,
+STORE, 140730523344896, 140730523348991,
+STORE, 140730523332608, 140730523344895,
+STORE, 139815921152000, 139815921180671,
+STORE, 139815921143808, 139815921151999,
+STORE, 139815915143168, 139815918940159,
+SNULL, 139815915143168, 139815916802047,
+STORE, 139815916802048, 139815918940159,
+STORE, 139815915143168, 139815916802047,
+SNULL, 139815918899199, 139815918940159,
+STORE, 139815916802048, 139815918899199,
+STORE, 139815918899200, 139815918940159,
+SNULL, 139815918899200, 139815918923775,
+STORE, 139815918923776, 139815918940159,
+STORE, 139815918899200, 139815918923775,
+ERASE, 139815918899200, 139815918923775,
+STORE, 139815918899200, 139815918923775,
+ERASE, 139815918923776, 139815918940159,
+STORE, 139815918923776, 139815918940159,
+SNULL, 139815918915583, 139815918923775,
+STORE, 139815918899200, 139815918915583,
+STORE, 139815918915584, 139815918923775,
+SNULL, 94196045332479, 94196045336575,
+STORE, 94196045324288, 94196045332479,
+STORE, 94196045332480, 94196045336575,
+SNULL, 139815921184767, 139815921188863,
+STORE, 139815921180672, 139815921184767,
+STORE, 139815921184768, 139815921188863,
+ERASE, 139815921152000, 139815921180671,
+STORE, 94196076183552, 94196076318719,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722460393472, 140737488351231,
+SNULL, 140722460401663, 140737488351231,
+STORE, 140722460393472, 140722460401663,
+STORE, 140722460262400, 140722460401663,
+STORE, 94569810399232, 94569812623359,
+SNULL, 94569810509823, 94569812623359,
+STORE, 94569810399232, 94569810509823,
+STORE, 94569810509824, 94569812623359,
+ERASE, 94569810509824, 94569812623359,
+STORE, 94569812602880, 94569812615167,
+STORE, 94569812615168, 94569812623359,
+STORE, 139681565450240, 139681567703039,
+SNULL, 139681565593599, 139681567703039,
+STORE, 139681565450240, 139681565593599,
+STORE, 139681565593600, 139681567703039,
+ERASE, 139681565593600, 139681567703039,
+STORE, 139681567690752, 139681567698943,
+STORE, 139681567698944, 139681567703039,
+STORE, 140722460569600, 140722460573695,
+STORE, 140722460557312, 140722460569599,
+STORE, 139681567662080, 139681567690751,
+STORE, 139681567653888, 139681567662079,
+STORE, 139681561653248, 139681565450239,
+SNULL, 139681561653248, 139681563312127,
+STORE, 139681563312128, 139681565450239,
+STORE, 139681561653248, 139681563312127,
+SNULL, 139681565409279, 139681565450239,
+STORE, 139681563312128, 139681565409279,
+STORE, 139681565409280, 139681565450239,
+SNULL, 139681565409280, 139681565433855,
+STORE, 139681565433856, 139681565450239,
+STORE, 139681565409280, 139681565433855,
+ERASE, 139681565409280, 139681565433855,
+STORE, 139681565409280, 139681565433855,
+ERASE, 139681565433856, 139681565450239,
+STORE, 139681565433856, 139681565450239,
+SNULL, 139681565425663, 139681565433855,
+STORE, 139681565409280, 139681565425663,
+STORE, 139681565425664, 139681565433855,
+SNULL, 94569812611071, 94569812615167,
+STORE, 94569812602880, 94569812611071,
+STORE, 94569812611072, 94569812615167,
+SNULL, 139681567694847, 139681567698943,
+STORE, 139681567690752, 139681567694847,
+STORE, 139681567694848, 139681567698943,
+ERASE, 139681567662080, 139681567690751,
+STORE, 94569818066944, 94569818202111,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431534280703,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725452365824, 140737488351231,
+SNULL, 140725452374015, 140737488351231,
+STORE, 140725452365824, 140725452374015,
+STORE, 140725452234752, 140725452374015,
+STORE, 94395067465728, 94395069689855,
+SNULL, 94395067576319, 94395069689855,
+STORE, 94395067465728, 94395067576319,
+STORE, 94395067576320, 94395069689855,
+ERASE, 94395067576320, 94395069689855,
+STORE, 94395069669376, 94395069681663,
+STORE, 94395069681664, 94395069689855,
+STORE, 140269941211136, 140269943463935,
+SNULL, 140269941354495, 140269943463935,
+STORE, 140269941211136, 140269941354495,
+STORE, 140269941354496, 140269943463935,
+ERASE, 140269941354496, 140269943463935,
+STORE, 140269943451648, 140269943459839,
+STORE, 140269943459840, 140269943463935,
+STORE, 140725452558336, 140725452562431,
+STORE, 140725452546048, 140725452558335,
+STORE, 140269943422976, 140269943451647,
+STORE, 140269943414784, 140269943422975,
+STORE, 140269937414144, 140269941211135,
+SNULL, 140269937414144, 140269939073023,
+STORE, 140269939073024, 140269941211135,
+STORE, 140269937414144, 140269939073023,
+SNULL, 140269941170175, 140269941211135,
+STORE, 140269939073024, 140269941170175,
+STORE, 140269941170176, 140269941211135,
+SNULL, 140269941170176, 140269941194751,
+STORE, 140269941194752, 140269941211135,
+STORE, 140269941170176, 140269941194751,
+ERASE, 140269941170176, 140269941194751,
+STORE, 140269941170176, 140269941194751,
+ERASE, 140269941194752, 140269941211135,
+STORE, 140269941194752, 140269941211135,
+SNULL, 140269941186559, 140269941194751,
+STORE, 140269941170176, 140269941186559,
+STORE, 140269941186560, 140269941194751,
+SNULL, 94395069677567, 94395069681663,
+STORE, 94395069669376, 94395069677567,
+STORE, 94395069677568, 94395069681663,
+SNULL, 140269943455743, 140269943459839,
+STORE, 140269943451648, 140269943455743,
+STORE, 140269943455744, 140269943459839,
+ERASE, 140269943422976, 140269943451647,
+STORE, 94395101691904, 94395101827071,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733860118528, 140737488351231,
+SNULL, 140733860126719, 140737488351231,
+STORE, 140733860118528, 140733860126719,
+STORE, 140733859987456, 140733860126719,
+STORE, 94484752990208, 94484755214335,
+SNULL, 94484753100799, 94484755214335,
+STORE, 94484752990208, 94484753100799,
+STORE, 94484753100800, 94484755214335,
+ERASE, 94484753100800, 94484755214335,
+STORE, 94484755193856, 94484755206143,
+STORE, 94484755206144, 94484755214335,
+STORE, 139958922309632, 139958924562431,
+SNULL, 139958922452991, 139958924562431,
+STORE, 139958922309632, 139958922452991,
+STORE, 139958922452992, 139958924562431,
+ERASE, 139958922452992, 139958924562431,
+STORE, 139958924550144, 139958924558335,
+STORE, 139958924558336, 139958924562431,
+STORE, 140733860253696, 140733860257791,
+STORE, 140733860241408, 140733860253695,
+STORE, 139958924521472, 139958924550143,
+STORE, 139958924513280, 139958924521471,
+STORE, 139958918512640, 139958922309631,
+SNULL, 139958918512640, 139958920171519,
+STORE, 139958920171520, 139958922309631,
+STORE, 139958918512640, 139958920171519,
+SNULL, 139958922268671, 139958922309631,
+STORE, 139958920171520, 139958922268671,
+STORE, 139958922268672, 139958922309631,
+SNULL, 139958922268672, 139958922293247,
+STORE, 139958922293248, 139958922309631,
+STORE, 139958922268672, 139958922293247,
+ERASE, 139958922268672, 139958922293247,
+STORE, 139958922268672, 139958922293247,
+ERASE, 139958922293248, 139958922309631,
+STORE, 139958922293248, 139958922309631,
+SNULL, 139958922285055, 139958922293247,
+STORE, 139958922268672, 139958922285055,
+STORE, 139958922285056, 139958922293247,
+SNULL, 94484755202047, 94484755206143,
+STORE, 94484755193856, 94484755202047,
+STORE, 94484755202048, 94484755206143,
+SNULL, 139958924554239, 139958924558335,
+STORE, 139958924550144, 139958924554239,
+STORE, 139958924554240, 139958924558335,
+ERASE, 139958924521472, 139958924550143,
+STORE, 94484777615360, 94484777750527,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731051036672, 140737488351231,
+SNULL, 140731051044863, 140737488351231,
+STORE, 140731051036672, 140731051044863,
+STORE, 140731050905600, 140731051044863,
+STORE, 93945822998528, 93945825222655,
+SNULL, 93945823109119, 93945825222655,
+STORE, 93945822998528, 93945823109119,
+STORE, 93945823109120, 93945825222655,
+ERASE, 93945823109120, 93945825222655,
+STORE, 93945825202176, 93945825214463,
+STORE, 93945825214464, 93945825222655,
+STORE, 140153503997952, 140153506250751,
+SNULL, 140153504141311, 140153506250751,
+STORE, 140153503997952, 140153504141311,
+STORE, 140153504141312, 140153506250751,
+ERASE, 140153504141312, 140153506250751,
+STORE, 140153506238464, 140153506246655,
+STORE, 140153506246656, 140153506250751,
+STORE, 140731051331584, 140731051335679,
+STORE, 140731051319296, 140731051331583,
+STORE, 140153506209792, 140153506238463,
+STORE, 140153506201600, 140153506209791,
+STORE, 140153500200960, 140153503997951,
+SNULL, 140153500200960, 140153501859839,
+STORE, 140153501859840, 140153503997951,
+STORE, 140153500200960, 140153501859839,
+SNULL, 140153503956991, 140153503997951,
+STORE, 140153501859840, 140153503956991,
+STORE, 140153503956992, 140153503997951,
+SNULL, 140153503956992, 140153503981567,
+STORE, 140153503981568, 140153503997951,
+STORE, 140153503956992, 140153503981567,
+ERASE, 140153503956992, 140153503981567,
+STORE, 140153503956992, 140153503981567,
+ERASE, 140153503981568, 140153503997951,
+STORE, 140153503981568, 140153503997951,
+SNULL, 140153503973375, 140153503981567,
+STORE, 140153503956992, 140153503973375,
+STORE, 140153503973376, 140153503981567,
+SNULL, 93945825210367, 93945825214463,
+STORE, 93945825202176, 93945825210367,
+STORE, 93945825210368, 93945825214463,
+SNULL, 140153506242559, 140153506246655,
+STORE, 140153506238464, 140153506242559,
+STORE, 140153506242560, 140153506246655,
+ERASE, 140153506209792, 140153506238463,
+STORE, 93945854537728, 93945854672895,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431537885183,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736025325568, 140737488351231,
+SNULL, 140736025333759, 140737488351231,
+STORE, 140736025325568, 140736025333759,
+STORE, 140736025194496, 140736025333759,
+STORE, 94809095172096, 94809097396223,
+SNULL, 94809095282687, 94809097396223,
+STORE, 94809095172096, 94809095282687,
+STORE, 94809095282688, 94809097396223,
+ERASE, 94809095282688, 94809097396223,
+STORE, 94809097375744, 94809097388031,
+STORE, 94809097388032, 94809097396223,
+STORE, 140194992517120, 140194994769919,
+SNULL, 140194992660479, 140194994769919,
+STORE, 140194992517120, 140194992660479,
+STORE, 140194992660480, 140194994769919,
+ERASE, 140194992660480, 140194994769919,
+STORE, 140194994757632, 140194994765823,
+STORE, 140194994765824, 140194994769919,
+STORE, 140736026173440, 140736026177535,
+STORE, 140736026161152, 140736026173439,
+STORE, 140194994728960, 140194994757631,
+STORE, 140194994720768, 140194994728959,
+STORE, 140194988720128, 140194992517119,
+SNULL, 140194988720128, 140194990379007,
+STORE, 140194990379008, 140194992517119,
+STORE, 140194988720128, 140194990379007,
+SNULL, 140194992476159, 140194992517119,
+STORE, 140194990379008, 140194992476159,
+STORE, 140194992476160, 140194992517119,
+SNULL, 140194992476160, 140194992500735,
+STORE, 140194992500736, 140194992517119,
+STORE, 140194992476160, 140194992500735,
+ERASE, 140194992476160, 140194992500735,
+STORE, 140194992476160, 140194992500735,
+ERASE, 140194992500736, 140194992517119,
+STORE, 140194992500736, 140194992517119,
+SNULL, 140194992492543, 140194992500735,
+STORE, 140194992476160, 140194992492543,
+STORE, 140194992492544, 140194992500735,
+SNULL, 94809097383935, 94809097388031,
+STORE, 94809097375744, 94809097383935,
+STORE, 94809097383936, 94809097388031,
+SNULL, 140194994761727, 140194994765823,
+STORE, 140194994757632, 140194994761727,
+STORE, 140194994761728, 140194994765823,
+ERASE, 140194994728960, 140194994757631,
+STORE, 94809124286464, 94809124421631,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140726342660096, 140737488351231,
+SNULL, 140726342668287, 140737488351231,
+STORE, 140726342660096, 140726342668287,
+STORE, 140726342529024, 140726342668287,
+STORE, 94140331462656, 94140333686783,
+SNULL, 94140331573247, 94140333686783,
+STORE, 94140331462656, 94140331573247,
+STORE, 94140331573248, 94140333686783,
+ERASE, 94140331573248, 94140333686783,
+STORE, 94140333666304, 94140333678591,
+STORE, 94140333678592, 94140333686783,
+STORE, 140714077208576, 140714079461375,
+SNULL, 140714077351935, 140714079461375,
+STORE, 140714077208576, 140714077351935,
+STORE, 140714077351936, 140714079461375,
+ERASE, 140714077351936, 140714079461375,
+STORE, 140714079449088, 140714079457279,
+STORE, 140714079457280, 140714079461375,
+STORE, 140726343933952, 140726343938047,
+STORE, 140726343921664, 140726343933951,
+STORE, 140714079420416, 140714079449087,
+STORE, 140714079412224, 140714079420415,
+STORE, 140714073411584, 140714077208575,
+SNULL, 140714073411584, 140714075070463,
+STORE, 140714075070464, 140714077208575,
+STORE, 140714073411584, 140714075070463,
+SNULL, 140714077167615, 140714077208575,
+STORE, 140714075070464, 140714077167615,
+STORE, 140714077167616, 140714077208575,
+SNULL, 140714077167616, 140714077192191,
+STORE, 140714077192192, 140714077208575,
+STORE, 140714077167616, 140714077192191,
+ERASE, 140714077167616, 140714077192191,
+STORE, 140714077167616, 140714077192191,
+ERASE, 140714077192192, 140714077208575,
+STORE, 140714077192192, 140714077208575,
+SNULL, 140714077183999, 140714077192191,
+STORE, 140714077167616, 140714077183999,
+STORE, 140714077184000, 140714077192191,
+SNULL, 94140333674495, 94140333678591,
+STORE, 94140333666304, 94140333674495,
+STORE, 94140333674496, 94140333678591,
+SNULL, 140714079453183, 140714079457279,
+STORE, 140714079449088, 140714079453183,
+STORE, 140714079453184, 140714079457279,
+ERASE, 140714079420416, 140714079449087,
+STORE, 94140341432320, 94140341567487,
+STORE, 94431504838656, 94431505051647,
+STORE, 94431507148800, 94431507152895,
+STORE, 94431507152896, 94431507161087,
+STORE, 94431507161088, 94431507173375,
+STORE, 94431510286336, 94431539601407,
+STORE, 139818797948928, 139818799607807,
+STORE, 139818799607808, 139818801704959,
+STORE, 139818801704960, 139818801721343,
+STORE, 139818801721344, 139818801729535,
+STORE, 139818801729536, 139818801745919,
+STORE, 139818801745920, 139818801758207,
+STORE, 139818801758208, 139818803851263,
+STORE, 139818803851264, 139818803855359,
+STORE, 139818803855360, 139818803859455,
+STORE, 139818803859456, 139818804002815,
+STORE, 139818804371456, 139818806054911,
+STORE, 139818806054912, 139818806071295,
+STORE, 139818806099968, 139818806104063,
+STORE, 139818806104064, 139818806108159,
+STORE, 139818806108160, 139818806112255,
+STORE, 140731430457344, 140731430596607,
+STORE, 140731431227392, 140731431239679,
+STORE, 140731431239680, 140731431243775,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725843607552, 140737488351231,
+SNULL, 140725843615743, 140737488351231,
+STORE, 140725843607552, 140725843615743,
+STORE, 140725843476480, 140725843615743,
+STORE, 94889043505152, 94889045839871,
+SNULL, 94889043718143, 94889045839871,
+STORE, 94889043505152, 94889043718143,
+STORE, 94889043718144, 94889045839871,
+ERASE, 94889043718144, 94889045839871,
+STORE, 94889045815296, 94889045827583,
+STORE, 94889045827584, 94889045839871,
+STORE, 140250965946368, 140250968199167,
+SNULL, 140250966089727, 140250968199167,
+STORE, 140250965946368, 140250966089727,
+STORE, 140250966089728, 140250968199167,
+ERASE, 140250966089728, 140250968199167,
+STORE, 140250968186880, 140250968195071,
+STORE, 140250968195072, 140250968199167,
+STORE, 140725844500480, 140725844504575,
+STORE, 140725844488192, 140725844500479,
+STORE, 140250968158208, 140250968186879,
+STORE, 140250968150016, 140250968158207,
+STORE, 140250963832832, 140250965946367,
+SNULL, 140250963832832, 140250963845119,
+STORE, 140250963845120, 140250965946367,
+STORE, 140250963832832, 140250963845119,
+SNULL, 140250965938175, 140250965946367,
+STORE, 140250963845120, 140250965938175,
+STORE, 140250965938176, 140250965946367,
+ERASE, 140250965938176, 140250965946367,
+STORE, 140250965938176, 140250965946367,
+STORE, 140250960035840, 140250963832831,
+SNULL, 140250960035840, 140250961694719,
+STORE, 140250961694720, 140250963832831,
+STORE, 140250960035840, 140250961694719,
+SNULL, 140250963791871, 140250963832831,
+STORE, 140250961694720, 140250963791871,
+STORE, 140250963791872, 140250963832831,
+SNULL, 140250963791872, 140250963816447,
+STORE, 140250963816448, 140250963832831,
+STORE, 140250963791872, 140250963816447,
+ERASE, 140250963791872, 140250963816447,
+STORE, 140250963791872, 140250963816447,
+ERASE, 140250963816448, 140250963832831,
+STORE, 140250963816448, 140250963832831,
+STORE, 140250968141824, 140250968158207,
+SNULL, 140250963808255, 140250963816447,
+STORE, 140250963791872, 140250963808255,
+STORE, 140250963808256, 140250963816447,
+SNULL, 140250965942271, 140250965946367,
+STORE, 140250965938176, 140250965942271,
+STORE, 140250965942272, 140250965946367,
+SNULL, 94889045819391, 94889045827583,
+STORE, 94889045815296, 94889045819391,
+STORE, 94889045819392, 94889045827583,
+SNULL, 140250968190975, 140250968195071,
+STORE, 140250968186880, 140250968190975,
+STORE, 140250968190976, 140250968195071,
+ERASE, 140250968158208, 140250968186879,
+STORE, 94889052213248, 94889052348415,
+STORE, 140250966458368, 140250968141823,
+STORE, 94889052213248, 94889052483583,
+STORE, 94889052213248, 94889052618751,
+STORE, 94170851819520, 94170852032511,
+STORE, 94170854129664, 94170854133759,
+STORE, 94170854133760, 94170854141951,
+STORE, 94170854141952, 94170854154239,
+STORE, 94170866515968, 94170867740671,
+STORE, 140062030422016, 140062032080895,
+STORE, 140062032080896, 140062034178047,
+STORE, 140062034178048, 140062034194431,
+STORE, 140062034194432, 140062034202623,
+STORE, 140062034202624, 140062034219007,
+STORE, 140062034219008, 140062034231295,
+STORE, 140062034231296, 140062036324351,
+STORE, 140062036324352, 140062036328447,
+STORE, 140062036328448, 140062036332543,
+STORE, 140062036332544, 140062036475903,
+STORE, 140062036844544, 140062038527999,
+STORE, 140062038528000, 140062038544383,
+STORE, 140062038573056, 140062038577151,
+STORE, 140062038577152, 140062038581247,
+STORE, 140062038581248, 140062038585343,
+STORE, 140736210550784, 140736210690047,
+STORE, 140736210759680, 140736210771967,
+STORE, 140736210771968, 140736210776063,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724272365568, 140737488351231,
+SNULL, 140724272373759, 140737488351231,
+STORE, 140724272365568, 140724272373759,
+STORE, 140724272234496, 140724272373759,
+STORE, 94607711965184, 94607714189311,
+SNULL, 94607712075775, 94607714189311,
+STORE, 94607711965184, 94607712075775,
+STORE, 94607712075776, 94607714189311,
+ERASE, 94607712075776, 94607714189311,
+STORE, 94607714168832, 94607714181119,
+STORE, 94607714181120, 94607714189311,
+STORE, 140054949253120, 140054951505919,
+SNULL, 140054949396479, 140054951505919,
+STORE, 140054949253120, 140054949396479,
+STORE, 140054949396480, 140054951505919,
+ERASE, 140054949396480, 140054951505919,
+STORE, 140054951493632, 140054951501823,
+STORE, 140054951501824, 140054951505919,
+STORE, 140724272992256, 140724272996351,
+STORE, 140724272979968, 140724272992255,
+STORE, 140054951464960, 140054951493631,
+STORE, 140054951456768, 140054951464959,
+STORE, 140054945456128, 140054949253119,
+SNULL, 140054945456128, 140054947115007,
+STORE, 140054947115008, 140054949253119,
+STORE, 140054945456128, 140054947115007,
+SNULL, 140054949212159, 140054949253119,
+STORE, 140054947115008, 140054949212159,
+STORE, 140054949212160, 140054949253119,
+SNULL, 140054949212160, 140054949236735,
+STORE, 140054949236736, 140054949253119,
+STORE, 140054949212160, 140054949236735,
+ERASE, 140054949212160, 140054949236735,
+STORE, 140054949212160, 140054949236735,
+ERASE, 140054949236736, 140054949253119,
+STORE, 140054949236736, 140054949253119,
+SNULL, 140054949228543, 140054949236735,
+STORE, 140054949212160, 140054949228543,
+STORE, 140054949228544, 140054949236735,
+SNULL, 94607714177023, 94607714181119,
+STORE, 94607714168832, 94607714177023,
+STORE, 94607714177024, 94607714181119,
+SNULL, 140054951497727, 140054951501823,
+STORE, 140054951493632, 140054951497727,
+STORE, 140054951497728, 140054951501823,
+ERASE, 140054951464960, 140054951493631,
+STORE, 94607733374976, 94607733510143,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733586923520, 140737488351231,
+SNULL, 140733586931711, 140737488351231,
+STORE, 140733586923520, 140733586931711,
+STORE, 140733586792448, 140733586931711,
+STORE, 93901634904064, 93901637128191,
+SNULL, 93901635014655, 93901637128191,
+STORE, 93901634904064, 93901635014655,
+STORE, 93901635014656, 93901637128191,
+ERASE, 93901635014656, 93901637128191,
+STORE, 93901637107712, 93901637119999,
+STORE, 93901637120000, 93901637128191,
+STORE, 140086104784896, 140086107037695,
+SNULL, 140086104928255, 140086107037695,
+STORE, 140086104784896, 140086104928255,
+STORE, 140086104928256, 140086107037695,
+ERASE, 140086104928256, 140086107037695,
+STORE, 140086107025408, 140086107033599,
+STORE, 140086107033600, 140086107037695,
+STORE, 140733587263488, 140733587267583,
+STORE, 140733587251200, 140733587263487,
+STORE, 140086106996736, 140086107025407,
+STORE, 140086106988544, 140086106996735,
+STORE, 140086100987904, 140086104784895,
+SNULL, 140086100987904, 140086102646783,
+STORE, 140086102646784, 140086104784895,
+STORE, 140086100987904, 140086102646783,
+SNULL, 140086104743935, 140086104784895,
+STORE, 140086102646784, 140086104743935,
+STORE, 140086104743936, 140086104784895,
+SNULL, 140086104743936, 140086104768511,
+STORE, 140086104768512, 140086104784895,
+STORE, 140086104743936, 140086104768511,
+ERASE, 140086104743936, 140086104768511,
+STORE, 140086104743936, 140086104768511,
+ERASE, 140086104768512, 140086104784895,
+STORE, 140086104768512, 140086104784895,
+SNULL, 140086104760319, 140086104768511,
+STORE, 140086104743936, 140086104760319,
+STORE, 140086104760320, 140086104768511,
+SNULL, 93901637115903, 93901637119999,
+STORE, 93901637107712, 93901637115903,
+STORE, 93901637115904, 93901637119999,
+SNULL, 140086107029503, 140086107033599,
+STORE, 140086107025408, 140086107029503,
+STORE, 140086107029504, 140086107033599,
+ERASE, 140086106996736, 140086107025407,
+STORE, 93901662715904, 93901662851071,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723365613568, 140737488351231,
+SNULL, 140723365621759, 140737488351231,
+STORE, 140723365613568, 140723365621759,
+STORE, 140723365482496, 140723365621759,
+STORE, 94759193546752, 94759195770879,
+SNULL, 94759193657343, 94759195770879,
+STORE, 94759193546752, 94759193657343,
+STORE, 94759193657344, 94759195770879,
+ERASE, 94759193657344, 94759195770879,
+STORE, 94759195750400, 94759195762687,
+STORE, 94759195762688, 94759195770879,
+STORE, 140607636246528, 140607638499327,
+SNULL, 140607636389887, 140607638499327,
+STORE, 140607636246528, 140607636389887,
+STORE, 140607636389888, 140607638499327,
+ERASE, 140607636389888, 140607638499327,
+STORE, 140607638487040, 140607638495231,
+STORE, 140607638495232, 140607638499327,
+STORE, 140723365900288, 140723365904383,
+STORE, 140723365888000, 140723365900287,
+STORE, 140607638458368, 140607638487039,
+STORE, 140607638450176, 140607638458367,
+STORE, 140607632449536, 140607636246527,
+SNULL, 140607632449536, 140607634108415,
+STORE, 140607634108416, 140607636246527,
+STORE, 140607632449536, 140607634108415,
+SNULL, 140607636205567, 140607636246527,
+STORE, 140607634108416, 140607636205567,
+STORE, 140607636205568, 140607636246527,
+SNULL, 140607636205568, 140607636230143,
+STORE, 140607636230144, 140607636246527,
+STORE, 140607636205568, 140607636230143,
+ERASE, 140607636205568, 140607636230143,
+STORE, 140607636205568, 140607636230143,
+ERASE, 140607636230144, 140607636246527,
+STORE, 140607636230144, 140607636246527,
+SNULL, 140607636221951, 140607636230143,
+STORE, 140607636205568, 140607636221951,
+STORE, 140607636221952, 140607636230143,
+SNULL, 94759195758591, 94759195762687,
+STORE, 94759195750400, 94759195758591,
+STORE, 94759195758592, 94759195762687,
+SNULL, 140607638491135, 140607638495231,
+STORE, 140607638487040, 140607638491135,
+STORE, 140607638491136, 140607638495231,
+ERASE, 140607638458368, 140607638487039,
+STORE, 94759204995072, 94759205130239,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732503789568, 140737488351231,
+SNULL, 140732503797759, 140737488351231,
+STORE, 140732503789568, 140732503797759,
+STORE, 140732503658496, 140732503797759,
+STORE, 94077792956416, 94077795180543,
+SNULL, 94077793067007, 94077795180543,
+STORE, 94077792956416, 94077793067007,
+STORE, 94077793067008, 94077795180543,
+ERASE, 94077793067008, 94077795180543,
+STORE, 94077795160064, 94077795172351,
+STORE, 94077795172352, 94077795180543,
+STORE, 140359874252800, 140359876505599,
+SNULL, 140359874396159, 140359876505599,
+STORE, 140359874252800, 140359874396159,
+STORE, 140359874396160, 140359876505599,
+ERASE, 140359874396160, 140359876505599,
+STORE, 140359876493312, 140359876501503,
+STORE, 140359876501504, 140359876505599,
+STORE, 140732504465408, 140732504469503,
+STORE, 140732504453120, 140732504465407,
+STORE, 140359876464640, 140359876493311,
+STORE, 140359876456448, 140359876464639,
+STORE, 140359870455808, 140359874252799,
+SNULL, 140359870455808, 140359872114687,
+STORE, 140359872114688, 140359874252799,
+STORE, 140359870455808, 140359872114687,
+SNULL, 140359874211839, 140359874252799,
+STORE, 140359872114688, 140359874211839,
+STORE, 140359874211840, 140359874252799,
+SNULL, 140359874211840, 140359874236415,
+STORE, 140359874236416, 140359874252799,
+STORE, 140359874211840, 140359874236415,
+ERASE, 140359874211840, 140359874236415,
+STORE, 140359874211840, 140359874236415,
+ERASE, 140359874236416, 140359874252799,
+STORE, 140359874236416, 140359874252799,
+SNULL, 140359874228223, 140359874236415,
+STORE, 140359874211840, 140359874228223,
+STORE, 140359874228224, 140359874236415,
+SNULL, 94077795168255, 94077795172351,
+STORE, 94077795160064, 94077795168255,
+STORE, 94077795168256, 94077795172351,
+SNULL, 140359876497407, 140359876501503,
+STORE, 140359876493312, 140359876497407,
+STORE, 140359876497408, 140359876501503,
+ERASE, 140359876464640, 140359876493311,
+STORE, 94077808717824, 94077808852991,
+STORE, 94549486252032, 94549486465023,
+STORE, 94549488562176, 94549488566271,
+STORE, 94549488566272, 94549488574463,
+STORE, 94549488574464, 94549488586751,
+STORE, 94549503492096, 94549506121727,
+STORE, 140085800894464, 140085802553343,
+STORE, 140085802553344, 140085804650495,
+STORE, 140085804650496, 140085804666879,
+STORE, 140085804666880, 140085804675071,
+STORE, 140085804675072, 140085804691455,
+STORE, 140085804691456, 140085804703743,
+STORE, 140085804703744, 140085806796799,
+STORE, 140085806796800, 140085806800895,
+STORE, 140085806800896, 140085806804991,
+STORE, 140085806804992, 140085806948351,
+STORE, 140085807316992, 140085809000447,
+STORE, 140085809000448, 140085809016831,
+STORE, 140085809045504, 140085809049599,
+STORE, 140085809049600, 140085809053695,
+STORE, 140085809053696, 140085809057791,
+STORE, 140731810545664, 140731810684927,
+STORE, 140731810967552, 140731810979839,
+STORE, 140731810979840, 140731810983935,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724752330752, 140737488351231,
+SNULL, 140724752338943, 140737488351231,
+STORE, 140724752330752, 140724752338943,
+STORE, 140724752199680, 140724752338943,
+STORE, 94656357539840, 94656359874559,
+SNULL, 94656357752831, 94656359874559,
+STORE, 94656357539840, 94656357752831,
+STORE, 94656357752832, 94656359874559,
+ERASE, 94656357752832, 94656359874559,
+STORE, 94656359849984, 94656359862271,
+STORE, 94656359862272, 94656359874559,
+STORE, 139632585203712, 139632587456511,
+SNULL, 139632585347071, 139632587456511,
+STORE, 139632585203712, 139632585347071,
+STORE, 139632585347072, 139632587456511,
+ERASE, 139632585347072, 139632587456511,
+STORE, 139632587444224, 139632587452415,
+STORE, 139632587452416, 139632587456511,
+STORE, 139632587440128, 139632587444223,
+STORE, 139632587427840, 139632587440127,
+STORE, 139632587399168, 139632587427839,
+STORE, 139632587390976, 139632587399167,
+STORE, 139632583090176, 139632585203711,
+SNULL, 139632583090176, 139632583102463,
+STORE, 139632583102464, 139632585203711,
+STORE, 139632583090176, 139632583102463,
+SNULL, 139632585195519, 139632585203711,
+STORE, 139632583102464, 139632585195519,
+STORE, 139632585195520, 139632585203711,
+ERASE, 139632585195520, 139632585203711,
+STORE, 139632585195520, 139632585203711,
+STORE, 139632579293184, 139632583090175,
+SNULL, 139632579293184, 139632580952063,
+STORE, 139632580952064, 139632583090175,
+STORE, 139632579293184, 139632580952063,
+SNULL, 139632583049215, 139632583090175,
+STORE, 139632580952064, 139632583049215,
+STORE, 139632583049216, 139632583090175,
+SNULL, 139632583049216, 139632583073791,
+STORE, 139632583073792, 139632583090175,
+STORE, 139632583049216, 139632583073791,
+ERASE, 139632583049216, 139632583073791,
+STORE, 139632583049216, 139632583073791,
+ERASE, 139632583073792, 139632583090175,
+STORE, 139632583073792, 139632583090175,
+STORE, 139632587382784, 139632587399167,
+SNULL, 139632583065599, 139632583073791,
+STORE, 139632583049216, 139632583065599,
+STORE, 139632583065600, 139632583073791,
+SNULL, 139632585199615, 139632585203711,
+STORE, 139632585195520, 139632585199615,
+STORE, 139632585199616, 139632585203711,
+SNULL, 94656359854079, 94656359862271,
+STORE, 94656359849984, 94656359854079,
+STORE, 94656359854080, 94656359862271,
+SNULL, 139632587448319, 139632587452415,
+STORE, 139632587444224, 139632587448319,
+STORE, 139632587448320, 139632587452415,
+ERASE, 139632587399168, 139632587427839,
+STORE, 94656378912768, 94656379047935,
+STORE, 139632585699328, 139632587382783,
+STORE, 94656378912768, 94656379183103,
+STORE, 94656378912768, 94656379318271,
+STORE, 94656378912768, 94656379494399,
+SNULL, 94656379469823, 94656379494399,
+STORE, 94656378912768, 94656379469823,
+STORE, 94656379469824, 94656379494399,
+ERASE, 94656379469824, 94656379494399,
+STORE, 94656378912768, 94656379621375,
+STORE, 94656378912768, 94656379756543,
+STORE, 94656378912768, 94656379912191,
+STORE, 94656378912768, 94656380055551,
+STORE, 94656378912768, 94656380190719,
+STORE, 94656378912768, 94656380338175,
+SNULL, 94656380313599, 94656380338175,
+STORE, 94656378912768, 94656380313599,
+STORE, 94656380313600, 94656380338175,
+ERASE, 94656380313600, 94656380338175,
+STORE, 94656378912768, 94656380448767,
+SNULL, 94656380432383, 94656380448767,
+STORE, 94656378912768, 94656380432383,
+STORE, 94656380432384, 94656380448767,
+ERASE, 94656380432384, 94656380448767,
+STORE, 94656378912768, 94656380567551,
+STORE, 94656378912768, 94656380719103,
+STORE, 94656378912768, 94656380858367,
+STORE, 94656378912768, 94656380997631,
+STORE, 94656378912768, 94656381132799,
+SNULL, 94656381124607, 94656381132799,
+STORE, 94656378912768, 94656381124607,
+STORE, 94656381124608, 94656381132799,
+ERASE, 94656381124608, 94656381132799,
+STORE, 94656378912768, 94656381276159,
+STORE, 94656378912768, 94656381427711,
+STORE, 94604087611392, 94604087824383,
+STORE, 94604089921536, 94604089925631,
+STORE, 94604089925632, 94604089933823,
+STORE, 94604089933824, 94604089946111,
+STORE, 94604105125888, 94604106424319,
+STORE, 140454937694208, 140454939353087,
+STORE, 140454939353088, 140454941450239,
+STORE, 140454941450240, 140454941466623,
+STORE, 140454941466624, 140454941474815,
+STORE, 140454941474816, 140454941491199,
+STORE, 140454941491200, 140454941503487,
+STORE, 140454941503488, 140454943596543,
+STORE, 140454943596544, 140454943600639,
+STORE, 140454943600640, 140454943604735,
+STORE, 140454943604736, 140454943748095,
+STORE, 140454944116736, 140454945800191,
+STORE, 140454945800192, 140454945816575,
+STORE, 140454945845248, 140454945849343,
+STORE, 140454945849344, 140454945853439,
+STORE, 140454945853440, 140454945857535,
+STORE, 140728438214656, 140728438353919,
+STORE, 140728439095296, 140728439107583,
+STORE, 140728439107584, 140728439111679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727821099008, 140737488351231,
+SNULL, 140727821107199, 140737488351231,
+STORE, 140727821099008, 140727821107199,
+STORE, 140727820967936, 140727821107199,
+STORE, 94088457240576, 94088459575295,
+SNULL, 94088457453567, 94088459575295,
+STORE, 94088457240576, 94088457453567,
+STORE, 94088457453568, 94088459575295,
+ERASE, 94088457453568, 94088459575295,
+STORE, 94088459550720, 94088459563007,
+STORE, 94088459563008, 94088459575295,
+STORE, 140234378989568, 140234381242367,
+SNULL, 140234379132927, 140234381242367,
+STORE, 140234378989568, 140234379132927,
+STORE, 140234379132928, 140234381242367,
+ERASE, 140234379132928, 140234381242367,
+STORE, 140234381230080, 140234381238271,
+STORE, 140234381238272, 140234381242367,
+STORE, 140727822077952, 140727822082047,
+STORE, 140727822065664, 140727822077951,
+STORE, 140234381201408, 140234381230079,
+STORE, 140234381193216, 140234381201407,
+STORE, 140234376876032, 140234378989567,
+SNULL, 140234376876032, 140234376888319,
+STORE, 140234376888320, 140234378989567,
+STORE, 140234376876032, 140234376888319,
+SNULL, 140234378981375, 140234378989567,
+STORE, 140234376888320, 140234378981375,
+STORE, 140234378981376, 140234378989567,
+ERASE, 140234378981376, 140234378989567,
+STORE, 140234378981376, 140234378989567,
+STORE, 140234373079040, 140234376876031,
+SNULL, 140234373079040, 140234374737919,
+STORE, 140234374737920, 140234376876031,
+STORE, 140234373079040, 140234374737919,
+SNULL, 140234376835071, 140234376876031,
+STORE, 140234374737920, 140234376835071,
+STORE, 140234376835072, 140234376876031,
+SNULL, 140234376835072, 140234376859647,
+STORE, 140234376859648, 140234376876031,
+STORE, 140234376835072, 140234376859647,
+ERASE, 140234376835072, 140234376859647,
+STORE, 140234376835072, 140234376859647,
+ERASE, 140234376859648, 140234376876031,
+STORE, 140234376859648, 140234376876031,
+STORE, 140234381185024, 140234381201407,
+SNULL, 140234376851455, 140234376859647,
+STORE, 140234376835072, 140234376851455,
+STORE, 140234376851456, 140234376859647,
+SNULL, 140234378985471, 140234378989567,
+STORE, 140234378981376, 140234378985471,
+STORE, 140234378985472, 140234378989567,
+SNULL, 94088459554815, 94088459563007,
+STORE, 94088459550720, 94088459554815,
+STORE, 94088459554816, 94088459563007,
+SNULL, 140234381234175, 140234381238271,
+STORE, 140234381230080, 140234381234175,
+STORE, 140234381234176, 140234381238271,
+ERASE, 140234381201408, 140234381230079,
+STORE, 94088468852736, 94088468987903,
+STORE, 140234379501568, 140234381185023,
+STORE, 94088468852736, 94088469123071,
+STORE, 94088468852736, 94088469258239,
+STORE, 94110050402304, 94110050615295,
+STORE, 94110052712448, 94110052716543,
+STORE, 94110052716544, 94110052724735,
+STORE, 94110052724736, 94110052737023,
+STORE, 94110061875200, 94110062415871,
+STORE, 140139439357952, 140139441016831,
+STORE, 140139441016832, 140139443113983,
+STORE, 140139443113984, 140139443130367,
+STORE, 140139443130368, 140139443138559,
+STORE, 140139443138560, 140139443154943,
+STORE, 140139443154944, 140139443167231,
+STORE, 140139443167232, 140139445260287,
+STORE, 140139445260288, 140139445264383,
+STORE, 140139445264384, 140139445268479,
+STORE, 140139445268480, 140139445411839,
+STORE, 140139445780480, 140139447463935,
+STORE, 140139447463936, 140139447480319,
+STORE, 140139447508992, 140139447513087,
+STORE, 140139447513088, 140139447517183,
+STORE, 140139447517184, 140139447521279,
+STORE, 140731901427712, 140731901566975,
+STORE, 140731902259200, 140731902271487,
+STORE, 140731902271488, 140731902275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727282622464, 140737488351231,
+SNULL, 140727282630655, 140737488351231,
+STORE, 140727282622464, 140727282630655,
+STORE, 140727282491392, 140727282630655,
+STORE, 94266649866240, 94266652200959,
+SNULL, 94266650079231, 94266652200959,
+STORE, 94266649866240, 94266650079231,
+STORE, 94266650079232, 94266652200959,
+ERASE, 94266650079232, 94266652200959,
+STORE, 94266652176384, 94266652188671,
+STORE, 94266652188672, 94266652200959,
+STORE, 139888497991680, 139888500244479,
+SNULL, 139888498135039, 139888500244479,
+STORE, 139888497991680, 139888498135039,
+STORE, 139888498135040, 139888500244479,
+ERASE, 139888498135040, 139888500244479,
+STORE, 139888500232192, 139888500240383,
+STORE, 139888500240384, 139888500244479,
+STORE, 140727283113984, 140727283118079,
+STORE, 140727283101696, 140727283113983,
+STORE, 139888500203520, 139888500232191,
+STORE, 139888500195328, 139888500203519,
+STORE, 139888495878144, 139888497991679,
+SNULL, 139888495878144, 139888495890431,
+STORE, 139888495890432, 139888497991679,
+STORE, 139888495878144, 139888495890431,
+SNULL, 139888497983487, 139888497991679,
+STORE, 139888495890432, 139888497983487,
+STORE, 139888497983488, 139888497991679,
+ERASE, 139888497983488, 139888497991679,
+STORE, 139888497983488, 139888497991679,
+STORE, 139888492081152, 139888495878143,
+SNULL, 139888492081152, 139888493740031,
+STORE, 139888493740032, 139888495878143,
+STORE, 139888492081152, 139888493740031,
+SNULL, 139888495837183, 139888495878143,
+STORE, 139888493740032, 139888495837183,
+STORE, 139888495837184, 139888495878143,
+SNULL, 139888495837184, 139888495861759,
+STORE, 139888495861760, 139888495878143,
+STORE, 139888495837184, 139888495861759,
+ERASE, 139888495837184, 139888495861759,
+STORE, 139888495837184, 139888495861759,
+ERASE, 139888495861760, 139888495878143,
+STORE, 139888495861760, 139888495878143,
+STORE, 139888500187136, 139888500203519,
+SNULL, 139888495853567, 139888495861759,
+STORE, 139888495837184, 139888495853567,
+STORE, 139888495853568, 139888495861759,
+SNULL, 139888497987583, 139888497991679,
+STORE, 139888497983488, 139888497987583,
+STORE, 139888497987584, 139888497991679,
+SNULL, 94266652180479, 94266652188671,
+STORE, 94266652176384, 94266652180479,
+STORE, 94266652180480, 94266652188671,
+SNULL, 139888500236287, 139888500240383,
+STORE, 139888500232192, 139888500236287,
+STORE, 139888500236288, 139888500240383,
+ERASE, 139888500203520, 139888500232191,
+STORE, 94266678542336, 94266678677503,
+STORE, 139888498503680, 139888500187135,
+STORE, 94266678542336, 94266678812671,
+STORE, 94266678542336, 94266678947839,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722507702272, 140737488351231,
+SNULL, 140722507710463, 140737488351231,
+STORE, 140722507702272, 140722507710463,
+STORE, 140722507571200, 140722507710463,
+STORE, 94313981394944, 94313983729663,
+SNULL, 94313981607935, 94313983729663,
+STORE, 94313981394944, 94313981607935,
+STORE, 94313981607936, 94313983729663,
+ERASE, 94313981607936, 94313983729663,
+STORE, 94313983705088, 94313983717375,
+STORE, 94313983717376, 94313983729663,
+STORE, 140456286076928, 140456288329727,
+SNULL, 140456286220287, 140456288329727,
+STORE, 140456286076928, 140456286220287,
+STORE, 140456286220288, 140456288329727,
+ERASE, 140456286220288, 140456288329727,
+STORE, 140456288317440, 140456288325631,
+STORE, 140456288325632, 140456288329727,
+STORE, 140722507997184, 140722508001279,
+STORE, 140722507984896, 140722507997183,
+STORE, 140456288288768, 140456288317439,
+STORE, 140456288280576, 140456288288767,
+STORE, 140456283963392, 140456286076927,
+SNULL, 140456283963392, 140456283975679,
+STORE, 140456283975680, 140456286076927,
+STORE, 140456283963392, 140456283975679,
+SNULL, 140456286068735, 140456286076927,
+STORE, 140456283975680, 140456286068735,
+STORE, 140456286068736, 140456286076927,
+ERASE, 140456286068736, 140456286076927,
+STORE, 140456286068736, 140456286076927,
+STORE, 140456280166400, 140456283963391,
+SNULL, 140456280166400, 140456281825279,
+STORE, 140456281825280, 140456283963391,
+STORE, 140456280166400, 140456281825279,
+SNULL, 140456283922431, 140456283963391,
+STORE, 140456281825280, 140456283922431,
+STORE, 140456283922432, 140456283963391,
+SNULL, 140456283922432, 140456283947007,
+STORE, 140456283947008, 140456283963391,
+STORE, 140456283922432, 140456283947007,
+ERASE, 140456283922432, 140456283947007,
+STORE, 140456283922432, 140456283947007,
+ERASE, 140456283947008, 140456283963391,
+STORE, 140456283947008, 140456283963391,
+STORE, 140456288272384, 140456288288767,
+SNULL, 140456283938815, 140456283947007,
+STORE, 140456283922432, 140456283938815,
+STORE, 140456283938816, 140456283947007,
+SNULL, 140456286072831, 140456286076927,
+STORE, 140456286068736, 140456286072831,
+STORE, 140456286072832, 140456286076927,
+SNULL, 94313983709183, 94313983717375,
+STORE, 94313983705088, 94313983709183,
+STORE, 94313983709184, 94313983717375,
+SNULL, 140456288321535, 140456288325631,
+STORE, 140456288317440, 140456288321535,
+STORE, 140456288321536, 140456288325631,
+ERASE, 140456288288768, 140456288317439,
+STORE, 94314006716416, 94314006851583,
+STORE, 140456286588928, 140456288272383,
+STORE, 94314006716416, 94314006986751,
+STORE, 94314006716416, 94314007121919,
+STORE, 93948644454400, 93948644667391,
+STORE, 93948646764544, 93948646768639,
+STORE, 93948646768640, 93948646776831,
+STORE, 93948646776832, 93948646789119,
+STORE, 93948664999936, 93948667142143,
+STORE, 140187350659072, 140187352317951,
+STORE, 140187352317952, 140187354415103,
+STORE, 140187354415104, 140187354431487,
+STORE, 140187354431488, 140187354439679,
+STORE, 140187354439680, 140187354456063,
+STORE, 140187354456064, 140187354468351,
+STORE, 140187354468352, 140187356561407,
+STORE, 140187356561408, 140187356565503,
+STORE, 140187356565504, 140187356569599,
+STORE, 140187356569600, 140187356712959,
+STORE, 140187357081600, 140187358765055,
+STORE, 140187358765056, 140187358781439,
+STORE, 140187358810112, 140187358814207,
+STORE, 140187358814208, 140187358818303,
+STORE, 140187358818304, 140187358822399,
+STORE, 140730484518912, 140730484658175,
+STORE, 140730485690368, 140730485702655,
+STORE, 140730485702656, 140730485706751,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721211551744, 140737488351231,
+SNULL, 140721211559935, 140737488351231,
+STORE, 140721211551744, 140721211559935,
+STORE, 140721211420672, 140721211559935,
+STORE, 94105221423104, 94105223757823,
+SNULL, 94105221636095, 94105223757823,
+STORE, 94105221423104, 94105221636095,
+STORE, 94105221636096, 94105223757823,
+ERASE, 94105221636096, 94105223757823,
+STORE, 94105223733248, 94105223745535,
+STORE, 94105223745536, 94105223757823,
+STORE, 140474453676032, 140474455928831,
+SNULL, 140474453819391, 140474455928831,
+STORE, 140474453676032, 140474453819391,
+STORE, 140474453819392, 140474455928831,
+ERASE, 140474453819392, 140474455928831,
+STORE, 140474455916544, 140474455924735,
+STORE, 140474455924736, 140474455928831,
+STORE, 140721211703296, 140721211707391,
+STORE, 140721211691008, 140721211703295,
+STORE, 140474455887872, 140474455916543,
+STORE, 140474455879680, 140474455887871,
+STORE, 140474451562496, 140474453676031,
+SNULL, 140474451562496, 140474451574783,
+STORE, 140474451574784, 140474453676031,
+STORE, 140474451562496, 140474451574783,
+SNULL, 140474453667839, 140474453676031,
+STORE, 140474451574784, 140474453667839,
+STORE, 140474453667840, 140474453676031,
+ERASE, 140474453667840, 140474453676031,
+STORE, 140474453667840, 140474453676031,
+STORE, 140474447765504, 140474451562495,
+SNULL, 140474447765504, 140474449424383,
+STORE, 140474449424384, 140474451562495,
+STORE, 140474447765504, 140474449424383,
+SNULL, 140474451521535, 140474451562495,
+STORE, 140474449424384, 140474451521535,
+STORE, 140474451521536, 140474451562495,
+SNULL, 140474451521536, 140474451546111,
+STORE, 140474451546112, 140474451562495,
+STORE, 140474451521536, 140474451546111,
+ERASE, 140474451521536, 140474451546111,
+STORE, 140474451521536, 140474451546111,
+ERASE, 140474451546112, 140474451562495,
+STORE, 140474451546112, 140474451562495,
+STORE, 140474455871488, 140474455887871,
+SNULL, 140474451537919, 140474451546111,
+STORE, 140474451521536, 140474451537919,
+STORE, 140474451537920, 140474451546111,
+SNULL, 140474453671935, 140474453676031,
+STORE, 140474453667840, 140474453671935,
+STORE, 140474453671936, 140474453676031,
+SNULL, 94105223737343, 94105223745535,
+STORE, 94105223733248, 94105223737343,
+STORE, 94105223737344, 94105223745535,
+SNULL, 140474455920639, 140474455924735,
+STORE, 140474455916544, 140474455920639,
+STORE, 140474455920640, 140474455924735,
+ERASE, 140474455887872, 140474455916543,
+STORE, 94105238712320, 94105238847487,
+STORE, 140474454188032, 140474455871487,
+STORE, 94105238712320, 94105238982655,
+STORE, 94105238712320, 94105239117823,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732356354048, 140737488351231,
+SNULL, 140732356362239, 140737488351231,
+STORE, 140732356354048, 140732356362239,
+STORE, 140732356222976, 140732356362239,
+STORE, 94461165989888, 94461168324607,
+SNULL, 94461166202879, 94461168324607,
+STORE, 94461165989888, 94461166202879,
+STORE, 94461166202880, 94461168324607,
+ERASE, 94461166202880, 94461168324607,
+STORE, 94461168300032, 94461168312319,
+STORE, 94461168312320, 94461168324607,
+STORE, 140317255110656, 140317257363455,
+SNULL, 140317255254015, 140317257363455,
+STORE, 140317255110656, 140317255254015,
+STORE, 140317255254016, 140317257363455,
+ERASE, 140317255254016, 140317257363455,
+STORE, 140317257351168, 140317257359359,
+STORE, 140317257359360, 140317257363455,
+STORE, 140732356583424, 140732356587519,
+STORE, 140732356571136, 140732356583423,
+STORE, 140317257322496, 140317257351167,
+STORE, 140317257314304, 140317257322495,
+STORE, 140317252997120, 140317255110655,
+SNULL, 140317252997120, 140317253009407,
+STORE, 140317253009408, 140317255110655,
+STORE, 140317252997120, 140317253009407,
+SNULL, 140317255102463, 140317255110655,
+STORE, 140317253009408, 140317255102463,
+STORE, 140317255102464, 140317255110655,
+ERASE, 140317255102464, 140317255110655,
+STORE, 140317255102464, 140317255110655,
+STORE, 140317249200128, 140317252997119,
+SNULL, 140317249200128, 140317250859007,
+STORE, 140317250859008, 140317252997119,
+STORE, 140317249200128, 140317250859007,
+SNULL, 140317252956159, 140317252997119,
+STORE, 140317250859008, 140317252956159,
+STORE, 140317252956160, 140317252997119,
+SNULL, 140317252956160, 140317252980735,
+STORE, 140317252980736, 140317252997119,
+STORE, 140317252956160, 140317252980735,
+ERASE, 140317252956160, 140317252980735,
+STORE, 140317252956160, 140317252980735,
+ERASE, 140317252980736, 140317252997119,
+STORE, 140317252980736, 140317252997119,
+STORE, 140317257306112, 140317257322495,
+SNULL, 140317252972543, 140317252980735,
+STORE, 140317252956160, 140317252972543,
+STORE, 140317252972544, 140317252980735,
+SNULL, 140317255106559, 140317255110655,
+STORE, 140317255102464, 140317255106559,
+STORE, 140317255106560, 140317255110655,
+SNULL, 94461168304127, 94461168312319,
+STORE, 94461168300032, 94461168304127,
+STORE, 94461168304128, 94461168312319,
+SNULL, 140317257355263, 140317257359359,
+STORE, 140317257351168, 140317257355263,
+STORE, 140317257355264, 140317257359359,
+ERASE, 140317257322496, 140317257351167,
+STORE, 94461195268096, 94461195403263,
+STORE, 140317255622656, 140317257306111,
+STORE, 94461195268096, 94461195538431,
+STORE, 94461195268096, 94461195673599,
+STORE, 94110050402304, 94110050615295,
+STORE, 94110052712448, 94110052716543,
+STORE, 94110052716544, 94110052724735,
+STORE, 94110052724736, 94110052737023,
+STORE, 94110061875200, 94110062415871,
+STORE, 140139439357952, 140139441016831,
+STORE, 140139441016832, 140139443113983,
+STORE, 140139443113984, 140139443130367,
+STORE, 140139443130368, 140139443138559,
+STORE, 140139443138560, 140139443154943,
+STORE, 140139443154944, 140139443167231,
+STORE, 140139443167232, 140139445260287,
+STORE, 140139445260288, 140139445264383,
+STORE, 140139445264384, 140139445268479,
+STORE, 140139445268480, 140139445411839,
+STORE, 140139445780480, 140139447463935,
+STORE, 140139447463936, 140139447480319,
+STORE, 140139447508992, 140139447513087,
+STORE, 140139447513088, 140139447517183,
+STORE, 140139447517184, 140139447521279,
+STORE, 140731901427712, 140731901566975,
+STORE, 140731902259200, 140731902271487,
+STORE, 140731902271488, 140731902275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720941613056, 140737488351231,
+SNULL, 140720941621247, 140737488351231,
+STORE, 140720941613056, 140720941621247,
+STORE, 140720941481984, 140720941621247,
+STORE, 93902377721856, 93902379945983,
+SNULL, 93902377832447, 93902379945983,
+STORE, 93902377721856, 93902377832447,
+STORE, 93902377832448, 93902379945983,
+ERASE, 93902377832448, 93902379945983,
+STORE, 93902379925504, 93902379937791,
+STORE, 93902379937792, 93902379945983,
+STORE, 139836543635456, 139836545888255,
+SNULL, 139836543778815, 139836545888255,
+STORE, 139836543635456, 139836543778815,
+STORE, 139836543778816, 139836545888255,
+ERASE, 139836543778816, 139836545888255,
+STORE, 139836545875968, 139836545884159,
+STORE, 139836545884160, 139836545888255,
+STORE, 140720941711360, 140720941715455,
+STORE, 140720941699072, 140720941711359,
+STORE, 139836545847296, 139836545875967,
+STORE, 139836545839104, 139836545847295,
+STORE, 139836539838464, 139836543635455,
+SNULL, 139836539838464, 139836541497343,
+STORE, 139836541497344, 139836543635455,
+STORE, 139836539838464, 139836541497343,
+SNULL, 139836543594495, 139836543635455,
+STORE, 139836541497344, 139836543594495,
+STORE, 139836543594496, 139836543635455,
+SNULL, 139836543594496, 139836543619071,
+STORE, 139836543619072, 139836543635455,
+STORE, 139836543594496, 139836543619071,
+ERASE, 139836543594496, 139836543619071,
+STORE, 139836543594496, 139836543619071,
+ERASE, 139836543619072, 139836543635455,
+STORE, 139836543619072, 139836543635455,
+SNULL, 139836543610879, 139836543619071,
+STORE, 139836543594496, 139836543610879,
+STORE, 139836543610880, 139836543619071,
+SNULL, 93902379933695, 93902379937791,
+STORE, 93902379925504, 93902379933695,
+STORE, 93902379933696, 93902379937791,
+SNULL, 139836545880063, 139836545884159,
+STORE, 139836545875968, 139836545880063,
+STORE, 139836545880064, 139836545884159,
+ERASE, 139836545847296, 139836545875967,
+STORE, 93902396891136, 93902397026303,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736538206208, 140737488351231,
+SNULL, 140736538214399, 140737488351231,
+STORE, 140736538206208, 140736538214399,
+STORE, 140736538075136, 140736538214399,
+STORE, 94173471399936, 94173473734655,
+SNULL, 94173471612927, 94173473734655,
+STORE, 94173471399936, 94173471612927,
+STORE, 94173471612928, 94173473734655,
+ERASE, 94173471612928, 94173473734655,
+STORE, 94173473710080, 94173473722367,
+STORE, 94173473722368, 94173473734655,
+STORE, 140035513556992, 140035515809791,
+SNULL, 140035513700351, 140035515809791,
+STORE, 140035513556992, 140035513700351,
+STORE, 140035513700352, 140035515809791,
+ERASE, 140035513700352, 140035515809791,
+STORE, 140035515797504, 140035515805695,
+STORE, 140035515805696, 140035515809791,
+STORE, 140736538329088, 140736538333183,
+STORE, 140736538316800, 140736538329087,
+STORE, 140035515768832, 140035515797503,
+STORE, 140035515760640, 140035515768831,
+STORE, 140035511443456, 140035513556991,
+SNULL, 140035511443456, 140035511455743,
+STORE, 140035511455744, 140035513556991,
+STORE, 140035511443456, 140035511455743,
+SNULL, 140035513548799, 140035513556991,
+STORE, 140035511455744, 140035513548799,
+STORE, 140035513548800, 140035513556991,
+ERASE, 140035513548800, 140035513556991,
+STORE, 140035513548800, 140035513556991,
+STORE, 140035507646464, 140035511443455,
+SNULL, 140035507646464, 140035509305343,
+STORE, 140035509305344, 140035511443455,
+STORE, 140035507646464, 140035509305343,
+SNULL, 140035511402495, 140035511443455,
+STORE, 140035509305344, 140035511402495,
+STORE, 140035511402496, 140035511443455,
+SNULL, 140035511402496, 140035511427071,
+STORE, 140035511427072, 140035511443455,
+STORE, 140035511402496, 140035511427071,
+ERASE, 140035511402496, 140035511427071,
+STORE, 140035511402496, 140035511427071,
+ERASE, 140035511427072, 140035511443455,
+STORE, 140035511427072, 140035511443455,
+STORE, 140035515752448, 140035515768831,
+SNULL, 140035511418879, 140035511427071,
+STORE, 140035511402496, 140035511418879,
+STORE, 140035511418880, 140035511427071,
+SNULL, 140035513552895, 140035513556991,
+STORE, 140035513548800, 140035513552895,
+STORE, 140035513552896, 140035513556991,
+SNULL, 94173473714175, 94173473722367,
+STORE, 94173473710080, 94173473714175,
+STORE, 94173473714176, 94173473722367,
+SNULL, 140035515801599, 140035515805695,
+STORE, 140035515797504, 140035515801599,
+STORE, 140035515801600, 140035515805695,
+ERASE, 140035515768832, 140035515797503,
+STORE, 94173478645760, 94173478780927,
+STORE, 140035514068992, 140035515752447,
+STORE, 94173478645760, 94173478916095,
+STORE, 94173478645760, 94173479051263,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140724216176640, 140737488351231,
+SNULL, 140724216184831, 140737488351231,
+STORE, 140724216176640, 140724216184831,
+STORE, 140724216045568, 140724216184831,
+STORE, 94870930628608, 94870932963327,
+SNULL, 94870930841599, 94870932963327,
+STORE, 94870930628608, 94870930841599,
+STORE, 94870930841600, 94870932963327,
+ERASE, 94870930841600, 94870932963327,
+STORE, 94870932938752, 94870932951039,
+STORE, 94870932951040, 94870932963327,
+STORE, 140453683736576, 140453685989375,
+SNULL, 140453683879935, 140453685989375,
+STORE, 140453683736576, 140453683879935,
+STORE, 140453683879936, 140453685989375,
+ERASE, 140453683879936, 140453685989375,
+STORE, 140453685977088, 140453685985279,
+STORE, 140453685985280, 140453685989375,
+STORE, 140724216832000, 140724216836095,
+STORE, 140724216819712, 140724216831999,
+STORE, 140453685948416, 140453685977087,
+STORE, 140453685940224, 140453685948415,
+STORE, 140453681623040, 140453683736575,
+SNULL, 140453681623040, 140453681635327,
+STORE, 140453681635328, 140453683736575,
+STORE, 140453681623040, 140453681635327,
+SNULL, 140453683728383, 140453683736575,
+STORE, 140453681635328, 140453683728383,
+STORE, 140453683728384, 140453683736575,
+ERASE, 140453683728384, 140453683736575,
+STORE, 140453683728384, 140453683736575,
+STORE, 140453677826048, 140453681623039,
+SNULL, 140453677826048, 140453679484927,
+STORE, 140453679484928, 140453681623039,
+STORE, 140453677826048, 140453679484927,
+SNULL, 140453681582079, 140453681623039,
+STORE, 140453679484928, 140453681582079,
+STORE, 140453681582080, 140453681623039,
+SNULL, 140453681582080, 140453681606655,
+STORE, 140453681606656, 140453681623039,
+STORE, 140453681582080, 140453681606655,
+ERASE, 140453681582080, 140453681606655,
+STORE, 140453681582080, 140453681606655,
+ERASE, 140453681606656, 140453681623039,
+STORE, 140453681606656, 140453681623039,
+STORE, 140453685932032, 140453685948415,
+SNULL, 140453681598463, 140453681606655,
+STORE, 140453681582080, 140453681598463,
+STORE, 140453681598464, 140453681606655,
+SNULL, 140453683732479, 140453683736575,
+STORE, 140453683728384, 140453683732479,
+STORE, 140453683732480, 140453683736575,
+SNULL, 94870932942847, 94870932951039,
+STORE, 94870932938752, 94870932942847,
+STORE, 94870932942848, 94870932951039,
+SNULL, 140453685981183, 140453685985279,
+STORE, 140453685977088, 140453685981183,
+STORE, 140453685981184, 140453685985279,
+ERASE, 140453685948416, 140453685977087,
+STORE, 94870940565504, 94870940700671,
+STORE, 140453684248576, 140453685932031,
+STORE, 94870940565504, 94870940835839,
+STORE, 94870940565504, 94870940971007,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731275661312, 140737488351231,
+SNULL, 140731275669503, 140737488351231,
+STORE, 140731275661312, 140731275669503,
+STORE, 140731275530240, 140731275669503,
+STORE, 94642788548608, 94642790883327,
+SNULL, 94642788761599, 94642790883327,
+STORE, 94642788548608, 94642788761599,
+STORE, 94642788761600, 94642790883327,
+ERASE, 94642788761600, 94642790883327,
+STORE, 94642790858752, 94642790871039,
+STORE, 94642790871040, 94642790883327,
+STORE, 140228458749952, 140228461002751,
+SNULL, 140228458893311, 140228461002751,
+STORE, 140228458749952, 140228458893311,
+STORE, 140228458893312, 140228461002751,
+ERASE, 140228458893312, 140228461002751,
+STORE, 140228460990464, 140228460998655,
+STORE, 140228460998656, 140228461002751,
+STORE, 140731276349440, 140731276353535,
+STORE, 140731276337152, 140731276349439,
+STORE, 140228460961792, 140228460990463,
+STORE, 140228460953600, 140228460961791,
+STORE, 140228456636416, 140228458749951,
+SNULL, 140228456636416, 140228456648703,
+STORE, 140228456648704, 140228458749951,
+STORE, 140228456636416, 140228456648703,
+SNULL, 140228458741759, 140228458749951,
+STORE, 140228456648704, 140228458741759,
+STORE, 140228458741760, 140228458749951,
+ERASE, 140228458741760, 140228458749951,
+STORE, 140228458741760, 140228458749951,
+STORE, 140228452839424, 140228456636415,
+SNULL, 140228452839424, 140228454498303,
+STORE, 140228454498304, 140228456636415,
+STORE, 140228452839424, 140228454498303,
+SNULL, 140228456595455, 140228456636415,
+STORE, 140228454498304, 140228456595455,
+STORE, 140228456595456, 140228456636415,
+SNULL, 140228456595456, 140228456620031,
+STORE, 140228456620032, 140228456636415,
+STORE, 140228456595456, 140228456620031,
+ERASE, 140228456595456, 140228456620031,
+STORE, 140228456595456, 140228456620031,
+ERASE, 140228456620032, 140228456636415,
+STORE, 140228456620032, 140228456636415,
+STORE, 140228460945408, 140228460961791,
+SNULL, 140228456611839, 140228456620031,
+STORE, 140228456595456, 140228456611839,
+STORE, 140228456611840, 140228456620031,
+SNULL, 140228458745855, 140228458749951,
+STORE, 140228458741760, 140228458745855,
+STORE, 140228458745856, 140228458749951,
+SNULL, 94642790862847, 94642790871039,
+STORE, 94642790858752, 94642790862847,
+STORE, 94642790862848, 94642790871039,
+SNULL, 140228460994559, 140228460998655,
+STORE, 140228460990464, 140228460994559,
+STORE, 140228460994560, 140228460998655,
+ERASE, 140228460961792, 140228460990463,
+STORE, 94642801549312, 94642801684479,
+STORE, 140228459261952, 140228460945407,
+STORE, 94642801549312, 94642801819647,
+STORE, 94642801549312, 94642801954815,
+STORE, 94604087611392, 94604087824383,
+STORE, 94604089921536, 94604089925631,
+STORE, 94604089925632, 94604089933823,
+STORE, 94604089933824, 94604089946111,
+STORE, 94604105125888, 94604106424319,
+STORE, 140454937694208, 140454939353087,
+STORE, 140454939353088, 140454941450239,
+STORE, 140454941450240, 140454941466623,
+STORE, 140454941466624, 140454941474815,
+STORE, 140454941474816, 140454941491199,
+STORE, 140454941491200, 140454941503487,
+STORE, 140454941503488, 140454943596543,
+STORE, 140454943596544, 140454943600639,
+STORE, 140454943600640, 140454943604735,
+STORE, 140454943604736, 140454943748095,
+STORE, 140454944116736, 140454945800191,
+STORE, 140454945800192, 140454945816575,
+STORE, 140454945845248, 140454945849343,
+STORE, 140454945849344, 140454945853439,
+STORE, 140454945853440, 140454945857535,
+STORE, 140728438214656, 140728438353919,
+STORE, 140728439095296, 140728439107583,
+STORE, 140728439107584, 140728439111679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721843453952, 140737488351231,
+SNULL, 140721843462143, 140737488351231,
+STORE, 140721843453952, 140721843462143,
+STORE, 140721843322880, 140721843462143,
+STORE, 94465962455040, 94465964789759,
+SNULL, 94465962668031, 94465964789759,
+STORE, 94465962455040, 94465962668031,
+STORE, 94465962668032, 94465964789759,
+ERASE, 94465962668032, 94465964789759,
+STORE, 94465964765184, 94465964777471,
+STORE, 94465964777472, 94465964789759,
+STORE, 139913488314368, 139913490567167,
+SNULL, 139913488457727, 139913490567167,
+STORE, 139913488314368, 139913488457727,
+STORE, 139913488457728, 139913490567167,
+ERASE, 139913488457728, 139913490567167,
+STORE, 139913490554880, 139913490563071,
+STORE, 139913490563072, 139913490567167,
+STORE, 140721843503104, 140721843507199,
+STORE, 140721843490816, 140721843503103,
+STORE, 139913490526208, 139913490554879,
+STORE, 139913490518016, 139913490526207,
+STORE, 139913486200832, 139913488314367,
+SNULL, 139913486200832, 139913486213119,
+STORE, 139913486213120, 139913488314367,
+STORE, 139913486200832, 139913486213119,
+SNULL, 139913488306175, 139913488314367,
+STORE, 139913486213120, 139913488306175,
+STORE, 139913488306176, 139913488314367,
+ERASE, 139913488306176, 139913488314367,
+STORE, 139913488306176, 139913488314367,
+STORE, 139913482403840, 139913486200831,
+SNULL, 139913482403840, 139913484062719,
+STORE, 139913484062720, 139913486200831,
+STORE, 139913482403840, 139913484062719,
+SNULL, 139913486159871, 139913486200831,
+STORE, 139913484062720, 139913486159871,
+STORE, 139913486159872, 139913486200831,
+SNULL, 139913486159872, 139913486184447,
+STORE, 139913486184448, 139913486200831,
+STORE, 139913486159872, 139913486184447,
+ERASE, 139913486159872, 139913486184447,
+STORE, 139913486159872, 139913486184447,
+ERASE, 139913486184448, 139913486200831,
+STORE, 139913486184448, 139913486200831,
+STORE, 139913490509824, 139913490526207,
+SNULL, 139913486176255, 139913486184447,
+STORE, 139913486159872, 139913486176255,
+STORE, 139913486176256, 139913486184447,
+SNULL, 139913488310271, 139913488314367,
+STORE, 139913488306176, 139913488310271,
+STORE, 139913488310272, 139913488314367,
+SNULL, 94465964769279, 94465964777471,
+STORE, 94465964765184, 94465964769279,
+STORE, 94465964769280, 94465964777471,
+SNULL, 139913490558975, 139913490563071,
+STORE, 139913490554880, 139913490558975,
+STORE, 139913490558976, 139913490563071,
+ERASE, 139913490526208, 139913490554879,
+STORE, 94465970024448, 94465970159615,
+STORE, 139913488826368, 139913490509823,
+STORE, 94465970024448, 94465970294783,
+STORE, 94465970024448, 94465970429951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140720583307264, 140737488351231,
+SNULL, 140720583315455, 140737488351231,
+STORE, 140720583307264, 140720583315455,
+STORE, 140720583176192, 140720583315455,
+STORE, 94212322082816, 94212324417535,
+SNULL, 94212322295807, 94212324417535,
+STORE, 94212322082816, 94212322295807,
+STORE, 94212322295808, 94212324417535,
+ERASE, 94212322295808, 94212324417535,
+STORE, 94212324392960, 94212324405247,
+STORE, 94212324405248, 94212324417535,
+STORE, 139659688538112, 139659690790911,
+SNULL, 139659688681471, 139659690790911,
+STORE, 139659688538112, 139659688681471,
+STORE, 139659688681472, 139659690790911,
+ERASE, 139659688681472, 139659690790911,
+STORE, 139659690778624, 139659690786815,
+STORE, 139659690786816, 139659690790911,
+STORE, 140720584781824, 140720584785919,
+STORE, 140720584769536, 140720584781823,
+STORE, 139659690749952, 139659690778623,
+STORE, 139659690741760, 139659690749951,
+STORE, 139659686424576, 139659688538111,
+SNULL, 139659686424576, 139659686436863,
+STORE, 139659686436864, 139659688538111,
+STORE, 139659686424576, 139659686436863,
+SNULL, 139659688529919, 139659688538111,
+STORE, 139659686436864, 139659688529919,
+STORE, 139659688529920, 139659688538111,
+ERASE, 139659688529920, 139659688538111,
+STORE, 139659688529920, 139659688538111,
+STORE, 139659682627584, 139659686424575,
+SNULL, 139659682627584, 139659684286463,
+STORE, 139659684286464, 139659686424575,
+STORE, 139659682627584, 139659684286463,
+SNULL, 139659686383615, 139659686424575,
+STORE, 139659684286464, 139659686383615,
+STORE, 139659686383616, 139659686424575,
+SNULL, 139659686383616, 139659686408191,
+STORE, 139659686408192, 139659686424575,
+STORE, 139659686383616, 139659686408191,
+ERASE, 139659686383616, 139659686408191,
+STORE, 139659686383616, 139659686408191,
+ERASE, 139659686408192, 139659686424575,
+STORE, 139659686408192, 139659686424575,
+STORE, 139659690733568, 139659690749951,
+SNULL, 139659686399999, 139659686408191,
+STORE, 139659686383616, 139659686399999,
+STORE, 139659686400000, 139659686408191,
+SNULL, 139659688534015, 139659688538111,
+STORE, 139659688529920, 139659688534015,
+STORE, 139659688534016, 139659688538111,
+SNULL, 94212324397055, 94212324405247,
+STORE, 94212324392960, 94212324397055,
+STORE, 94212324397056, 94212324405247,
+SNULL, 139659690782719, 139659690786815,
+STORE, 139659690778624, 139659690782719,
+STORE, 139659690782720, 139659690786815,
+ERASE, 139659690749952, 139659690778623,
+STORE, 94212355014656, 94212355149823,
+STORE, 139659689050112, 139659690733567,
+STORE, 94212355014656, 94212355284991,
+STORE, 94212355014656, 94212355420159,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140727689830400, 140737488351231,
+SNULL, 140727689838591, 140737488351231,
+STORE, 140727689830400, 140727689838591,
+STORE, 140727689699328, 140727689838591,
+STORE, 94572390281216, 94572392615935,
+SNULL, 94572390494207, 94572392615935,
+STORE, 94572390281216, 94572390494207,
+STORE, 94572390494208, 94572392615935,
+ERASE, 94572390494208, 94572392615935,
+STORE, 94572392591360, 94572392603647,
+STORE, 94572392603648, 94572392615935,
+STORE, 140575923769344, 140575926022143,
+SNULL, 140575923912703, 140575926022143,
+STORE, 140575923769344, 140575923912703,
+STORE, 140575923912704, 140575926022143,
+ERASE, 140575923912704, 140575926022143,
+STORE, 140575926009856, 140575926018047,
+STORE, 140575926018048, 140575926022143,
+STORE, 140727689871360, 140727689875455,
+STORE, 140727689859072, 140727689871359,
+STORE, 140575925981184, 140575926009855,
+STORE, 140575925972992, 140575925981183,
+STORE, 140575921655808, 140575923769343,
+SNULL, 140575921655808, 140575921668095,
+STORE, 140575921668096, 140575923769343,
+STORE, 140575921655808, 140575921668095,
+SNULL, 140575923761151, 140575923769343,
+STORE, 140575921668096, 140575923761151,
+STORE, 140575923761152, 140575923769343,
+ERASE, 140575923761152, 140575923769343,
+STORE, 140575923761152, 140575923769343,
+STORE, 140575917858816, 140575921655807,
+SNULL, 140575917858816, 140575919517695,
+STORE, 140575919517696, 140575921655807,
+STORE, 140575917858816, 140575919517695,
+SNULL, 140575921614847, 140575921655807,
+STORE, 140575919517696, 140575921614847,
+STORE, 140575921614848, 140575921655807,
+SNULL, 140575921614848, 140575921639423,
+STORE, 140575921639424, 140575921655807,
+STORE, 140575921614848, 140575921639423,
+ERASE, 140575921614848, 140575921639423,
+STORE, 140575921614848, 140575921639423,
+ERASE, 140575921639424, 140575921655807,
+STORE, 140575921639424, 140575921655807,
+STORE, 140575925964800, 140575925981183,
+SNULL, 140575921631231, 140575921639423,
+STORE, 140575921614848, 140575921631231,
+STORE, 140575921631232, 140575921639423,
+SNULL, 140575923765247, 140575923769343,
+STORE, 140575923761152, 140575923765247,
+STORE, 140575923765248, 140575923769343,
+SNULL, 94572392595455, 94572392603647,
+STORE, 94572392591360, 94572392595455,
+STORE, 94572392595456, 94572392603647,
+SNULL, 140575926013951, 140575926018047,
+STORE, 140575926009856, 140575926013951,
+STORE, 140575926013952, 140575926018047,
+ERASE, 140575925981184, 140575926009855,
+STORE, 94572402278400, 94572402413567,
+STORE, 140575924281344, 140575925964799,
+STORE, 94572402278400, 94572402548735,
+STORE, 94572402278400, 94572402683903,
+STORE, 94572402278400, 94572402851839,
+SNULL, 94572402827263, 94572402851839,
+STORE, 94572402278400, 94572402827263,
+STORE, 94572402827264, 94572402851839,
+ERASE, 94572402827264, 94572402851839,
+STORE, 94572402278400, 94572402966527,
+STORE, 94572402278400, 94572403109887,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725520506880, 140737488351231,
+SNULL, 140725520515071, 140737488351231,
+STORE, 140725520506880, 140725520515071,
+STORE, 140725520375808, 140725520515071,
+STORE, 93829948788736, 93829951012863,
+SNULL, 93829948899327, 93829951012863,
+STORE, 93829948788736, 93829948899327,
+STORE, 93829948899328, 93829951012863,
+ERASE, 93829948899328, 93829951012863,
+STORE, 93829950992384, 93829951004671,
+STORE, 93829951004672, 93829951012863,
+STORE, 140133696794624, 140133699047423,
+SNULL, 140133696937983, 140133699047423,
+STORE, 140133696794624, 140133696937983,
+STORE, 140133696937984, 140133699047423,
+ERASE, 140133696937984, 140133699047423,
+STORE, 140133699035136, 140133699043327,
+STORE, 140133699043328, 140133699047423,
+STORE, 140725520875520, 140725520879615,
+STORE, 140725520863232, 140725520875519,
+STORE, 140133699006464, 140133699035135,
+STORE, 140133698998272, 140133699006463,
+STORE, 140133692997632, 140133696794623,
+SNULL, 140133692997632, 140133694656511,
+STORE, 140133694656512, 140133696794623,
+STORE, 140133692997632, 140133694656511,
+SNULL, 140133696753663, 140133696794623,
+STORE, 140133694656512, 140133696753663,
+STORE, 140133696753664, 140133696794623,
+SNULL, 140133696753664, 140133696778239,
+STORE, 140133696778240, 140133696794623,
+STORE, 140133696753664, 140133696778239,
+ERASE, 140133696753664, 140133696778239,
+STORE, 140133696753664, 140133696778239,
+ERASE, 140133696778240, 140133696794623,
+STORE, 140133696778240, 140133696794623,
+SNULL, 140133696770047, 140133696778239,
+STORE, 140133696753664, 140133696770047,
+STORE, 140133696770048, 140133696778239,
+SNULL, 93829951000575, 93829951004671,
+STORE, 93829950992384, 93829951000575,
+STORE, 93829951000576, 93829951004671,
+SNULL, 140133699039231, 140133699043327,
+STORE, 140133699035136, 140133699039231,
+STORE, 140133699039232, 140133699043327,
+ERASE, 140133699006464, 140133699035135,
+STORE, 93829978693632, 93829978828799,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140736118022144, 140737488351231,
+SNULL, 140736118030335, 140737488351231,
+STORE, 140736118022144, 140736118030335,
+STORE, 140736117891072, 140736118030335,
+STORE, 94467663982592, 94467666206719,
+SNULL, 94467664093183, 94467666206719,
+STORE, 94467663982592, 94467664093183,
+STORE, 94467664093184, 94467666206719,
+ERASE, 94467664093184, 94467666206719,
+STORE, 94467666186240, 94467666198527,
+STORE, 94467666198528, 94467666206719,
+STORE, 140525377327104, 140525379579903,
+SNULL, 140525377470463, 140525379579903,
+STORE, 140525377327104, 140525377470463,
+STORE, 140525377470464, 140525379579903,
+ERASE, 140525377470464, 140525379579903,
+STORE, 140525379567616, 140525379575807,
+STORE, 140525379575808, 140525379579903,
+STORE, 140736118771712, 140736118775807,
+STORE, 140736118759424, 140736118771711,
+STORE, 140525379538944, 140525379567615,
+STORE, 140525379530752, 140525379538943,
+STORE, 140525373530112, 140525377327103,
+SNULL, 140525373530112, 140525375188991,
+STORE, 140525375188992, 140525377327103,
+STORE, 140525373530112, 140525375188991,
+SNULL, 140525377286143, 140525377327103,
+STORE, 140525375188992, 140525377286143,
+STORE, 140525377286144, 140525377327103,
+SNULL, 140525377286144, 140525377310719,
+STORE, 140525377310720, 140525377327103,
+STORE, 140525377286144, 140525377310719,
+ERASE, 140525377286144, 140525377310719,
+STORE, 140525377286144, 140525377310719,
+ERASE, 140525377310720, 140525377327103,
+STORE, 140525377310720, 140525377327103,
+SNULL, 140525377302527, 140525377310719,
+STORE, 140525377286144, 140525377302527,
+STORE, 140525377302528, 140525377310719,
+SNULL, 94467666194431, 94467666198527,
+STORE, 94467666186240, 94467666194431,
+STORE, 94467666194432, 94467666198527,
+SNULL, 140525379571711, 140525379575807,
+STORE, 140525379567616, 140525379571711,
+STORE, 140525379571712, 140525379575807,
+ERASE, 140525379538944, 140525379567615,
+STORE, 94467693379584, 94467693514751,
+STORE, 94200172744704, 94200172957695,
+STORE, 94200175054848, 94200175058943,
+STORE, 94200175058944, 94200175067135,
+STORE, 94200175067136, 94200175079423,
+STORE, 94200196673536, 94200198905855,
+STORE, 140053867720704, 140053869379583,
+STORE, 140053869379584, 140053871476735,
+STORE, 140053871476736, 140053871493119,
+STORE, 140053871493120, 140053871501311,
+STORE, 140053871501312, 140053871517695,
+STORE, 140053871517696, 140053871529983,
+STORE, 140053871529984, 140053873623039,
+STORE, 140053873623040, 140053873627135,
+STORE, 140053873627136, 140053873631231,
+STORE, 140053873631232, 140053873774591,
+STORE, 140053874143232, 140053875826687,
+STORE, 140053875826688, 140053875843071,
+STORE, 140053875871744, 140053875875839,
+STORE, 140053875875840, 140053875879935,
+STORE, 140053875879936, 140053875884031,
+STORE, 140728538484736, 140728538623999,
+STORE, 140728538652672, 140728538664959,
+STORE, 140728538664960, 140728538669055,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732307775488, 140737488351231,
+SNULL, 140732307783679, 140737488351231,
+STORE, 140732307775488, 140732307783679,
+STORE, 140732307644416, 140732307783679,
+STORE, 93831417630720, 93831419965439,
+SNULL, 93831417843711, 93831419965439,
+STORE, 93831417630720, 93831417843711,
+STORE, 93831417843712, 93831419965439,
+ERASE, 93831417843712, 93831419965439,
+STORE, 93831419940864, 93831419953151,
+STORE, 93831419953152, 93831419965439,
+STORE, 140241062088704, 140241064341503,
+SNULL, 140241062232063, 140241064341503,
+STORE, 140241062088704, 140241062232063,
+STORE, 140241062232064, 140241064341503,
+ERASE, 140241062232064, 140241064341503,
+STORE, 140241064329216, 140241064337407,
+STORE, 140241064337408, 140241064341503,
+STORE, 140732308140032, 140732308144127,
+STORE, 140732308127744, 140732308140031,
+STORE, 140241064300544, 140241064329215,
+STORE, 140241064292352, 140241064300543,
+STORE, 140241059975168, 140241062088703,
+SNULL, 140241059975168, 140241059987455,
+STORE, 140241059987456, 140241062088703,
+STORE, 140241059975168, 140241059987455,
+SNULL, 140241062080511, 140241062088703,
+STORE, 140241059987456, 140241062080511,
+STORE, 140241062080512, 140241062088703,
+ERASE, 140241062080512, 140241062088703,
+STORE, 140241062080512, 140241062088703,
+STORE, 140241056178176, 140241059975167,
+SNULL, 140241056178176, 140241057837055,
+STORE, 140241057837056, 140241059975167,
+STORE, 140241056178176, 140241057837055,
+SNULL, 140241059934207, 140241059975167,
+STORE, 140241057837056, 140241059934207,
+STORE, 140241059934208, 140241059975167,
+SNULL, 140241059934208, 140241059958783,
+STORE, 140241059958784, 140241059975167,
+STORE, 140241059934208, 140241059958783,
+ERASE, 140241059934208, 140241059958783,
+STORE, 140241059934208, 140241059958783,
+ERASE, 140241059958784, 140241059975167,
+STORE, 140241059958784, 140241059975167,
+STORE, 140241064284160, 140241064300543,
+SNULL, 140241059950591, 140241059958783,
+STORE, 140241059934208, 140241059950591,
+STORE, 140241059950592, 140241059958783,
+SNULL, 140241062084607, 140241062088703,
+STORE, 140241062080512, 140241062084607,
+STORE, 140241062084608, 140241062088703,
+SNULL, 93831419944959, 93831419953151,
+STORE, 93831419940864, 93831419944959,
+STORE, 93831419944960, 93831419953151,
+SNULL, 140241064333311, 140241064337407,
+STORE, 140241064329216, 140241064333311,
+STORE, 140241064333312, 140241064337407,
+ERASE, 140241064300544, 140241064329215,
+STORE, 93831435284480, 93831435419647,
+STORE, 140241062600704, 140241064284159,
+STORE, 93831435284480, 93831435554815,
+STORE, 93831435284480, 93831435689983,
+STORE, 93831435284480, 93831435862015,
+SNULL, 93831435837439, 93831435862015,
+STORE, 93831435284480, 93831435837439,
+STORE, 93831435837440, 93831435862015,
+ERASE, 93831435837440, 93831435862015,
+STORE, 93831435284480, 93831435972607,
+STORE, 93831435284480, 93831436107775,
+SNULL, 93831436091391, 93831436107775,
+STORE, 93831435284480, 93831436091391,
+STORE, 93831436091392, 93831436107775,
+ERASE, 93831436091392, 93831436107775,
+STORE, 93831435284480, 93831436226559,
+STORE, 93831435284480, 93831436361727,
+STORE, 93831435284480, 93831436505087,
+STORE, 93831435284480, 93831436652543,
+STORE, 93831435284480, 93831436787711,
+STORE, 93831435284480, 93831436926975,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140728546775040, 140737488351231,
+SNULL, 140728546783231, 140737488351231,
+STORE, 140728546775040, 140728546783231,
+STORE, 140728546643968, 140728546783231,
+STORE, 94456178786304, 94456181010431,
+SNULL, 94456178896895, 94456181010431,
+STORE, 94456178786304, 94456178896895,
+STORE, 94456178896896, 94456181010431,
+ERASE, 94456178896896, 94456181010431,
+STORE, 94456180989952, 94456181002239,
+STORE, 94456181002240, 94456181010431,
+STORE, 140221893091328, 140221895344127,
+SNULL, 140221893234687, 140221895344127,
+STORE, 140221893091328, 140221893234687,
+STORE, 140221893234688, 140221895344127,
+ERASE, 140221893234688, 140221895344127,
+STORE, 140221895331840, 140221895340031,
+STORE, 140221895340032, 140221895344127,
+STORE, 140728547803136, 140728547807231,
+STORE, 140728547790848, 140728547803135,
+STORE, 140221895303168, 140221895331839,
+STORE, 140221895294976, 140221895303167,
+STORE, 140221889294336, 140221893091327,
+SNULL, 140221889294336, 140221890953215,
+STORE, 140221890953216, 140221893091327,
+STORE, 140221889294336, 140221890953215,
+SNULL, 140221893050367, 140221893091327,
+STORE, 140221890953216, 140221893050367,
+STORE, 140221893050368, 140221893091327,
+SNULL, 140221893050368, 140221893074943,
+STORE, 140221893074944, 140221893091327,
+STORE, 140221893050368, 140221893074943,
+ERASE, 140221893050368, 140221893074943,
+STORE, 140221893050368, 140221893074943,
+ERASE, 140221893074944, 140221893091327,
+STORE, 140221893074944, 140221893091327,
+SNULL, 140221893066751, 140221893074943,
+STORE, 140221893050368, 140221893066751,
+STORE, 140221893066752, 140221893074943,
+SNULL, 94456180998143, 94456181002239,
+STORE, 94456180989952, 94456180998143,
+STORE, 94456180998144, 94456181002239,
+SNULL, 140221895335935, 140221895340031,
+STORE, 140221895331840, 140221895335935,
+STORE, 140221895335936, 140221895340031,
+ERASE, 140221895303168, 140221895331839,
+STORE, 94456203730944, 94456203866111,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140734438637568, 140737488351231,
+SNULL, 140734438645759, 140737488351231,
+STORE, 140734438637568, 140734438645759,
+STORE, 140734438506496, 140734438645759,
+STORE, 94652233351168, 94652235575295,
+SNULL, 94652233461759, 94652235575295,
+STORE, 94652233351168, 94652233461759,
+STORE, 94652233461760, 94652235575295,
+ERASE, 94652233461760, 94652235575295,
+STORE, 94652235554816, 94652235567103,
+STORE, 94652235567104, 94652235575295,
+STORE, 140536493195264, 140536495448063,
+SNULL, 140536493338623, 140536495448063,
+STORE, 140536493195264, 140536493338623,
+STORE, 140536493338624, 140536495448063,
+ERASE, 140536493338624, 140536495448063,
+STORE, 140536495435776, 140536495443967,
+STORE, 140536495443968, 140536495448063,
+STORE, 140734439002112, 140734439006207,
+STORE, 140734438989824, 140734439002111,
+STORE, 140536495407104, 140536495435775,
+STORE, 140536495398912, 140536495407103,
+STORE, 140536489398272, 140536493195263,
+SNULL, 140536489398272, 140536491057151,
+STORE, 140536491057152, 140536493195263,
+STORE, 140536489398272, 140536491057151,
+SNULL, 140536493154303, 140536493195263,
+STORE, 140536491057152, 140536493154303,
+STORE, 140536493154304, 140536493195263,
+SNULL, 140536493154304, 140536493178879,
+STORE, 140536493178880, 140536493195263,
+STORE, 140536493154304, 140536493178879,
+ERASE, 140536493154304, 140536493178879,
+STORE, 140536493154304, 140536493178879,
+ERASE, 140536493178880, 140536493195263,
+STORE, 140536493178880, 140536493195263,
+SNULL, 140536493170687, 140536493178879,
+STORE, 140536493154304, 140536493170687,
+STORE, 140536493170688, 140536493178879,
+SNULL, 94652235563007, 94652235567103,
+STORE, 94652235554816, 94652235563007,
+STORE, 94652235563008, 94652235567103,
+SNULL, 140536495439871, 140536495443967,
+STORE, 140536495435776, 140536495439871,
+STORE, 140536495439872, 140536495443967,
+ERASE, 140536495407104, 140536495435775,
+STORE, 94652265619456, 94652265754623,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140721814200320, 140737488351231,
+SNULL, 140721814208511, 140737488351231,
+STORE, 140721814200320, 140721814208511,
+STORE, 140721814069248, 140721814208511,
+STORE, 94062800691200, 94062802915327,
+SNULL, 94062800801791, 94062802915327,
+STORE, 94062800691200, 94062800801791,
+STORE, 94062800801792, 94062802915327,
+ERASE, 94062800801792, 94062802915327,
+STORE, 94062802894848, 94062802907135,
+STORE, 94062802907136, 94062802915327,
+STORE, 139717739700224, 139717741953023,
+SNULL, 139717739843583, 139717741953023,
+STORE, 139717739700224, 139717739843583,
+STORE, 139717739843584, 139717741953023,
+ERASE, 139717739843584, 139717741953023,
+STORE, 139717741940736, 139717741948927,
+STORE, 139717741948928, 139717741953023,
+STORE, 140721814224896, 140721814228991,
+STORE, 140721814212608, 140721814224895,
+STORE, 139717741912064, 139717741940735,
+STORE, 139717741903872, 139717741912063,
+STORE, 139717735903232, 139717739700223,
+SNULL, 139717735903232, 139717737562111,
+STORE, 139717737562112, 139717739700223,
+STORE, 139717735903232, 139717737562111,
+SNULL, 139717739659263, 139717739700223,
+STORE, 139717737562112, 139717739659263,
+STORE, 139717739659264, 139717739700223,
+SNULL, 139717739659264, 139717739683839,
+STORE, 139717739683840, 139717739700223,
+STORE, 139717739659264, 139717739683839,
+ERASE, 139717739659264, 139717739683839,
+STORE, 139717739659264, 139717739683839,
+ERASE, 139717739683840, 139717739700223,
+STORE, 139717739683840, 139717739700223,
+SNULL, 139717739675647, 139717739683839,
+STORE, 139717739659264, 139717739675647,
+STORE, 139717739675648, 139717739683839,
+SNULL, 94062802903039, 94062802907135,
+STORE, 94062802894848, 94062802903039,
+STORE, 94062802903040, 94062802907135,
+SNULL, 139717741944831, 139717741948927,
+STORE, 139717741940736, 139717741944831,
+STORE, 139717741944832, 139717741948927,
+ERASE, 139717741912064, 139717741940735,
+STORE, 94062814060544, 94062814195711,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723945754624, 140737488351231,
+SNULL, 140723945762815, 140737488351231,
+STORE, 140723945754624, 140723945762815,
+STORE, 140723945623552, 140723945762815,
+STORE, 94886119305216, 94886121639935,
+SNULL, 94886119518207, 94886121639935,
+STORE, 94886119305216, 94886119518207,
+STORE, 94886119518208, 94886121639935,
+ERASE, 94886119518208, 94886121639935,
+STORE, 94886121615360, 94886121627647,
+STORE, 94886121627648, 94886121639935,
+STORE, 140152532131840, 140152534384639,
+SNULL, 140152532275199, 140152534384639,
+STORE, 140152532131840, 140152532275199,
+STORE, 140152532275200, 140152534384639,
+ERASE, 140152532275200, 140152534384639,
+STORE, 140152534372352, 140152534380543,
+STORE, 140152534380544, 140152534384639,
+STORE, 140723946213376, 140723946217471,
+STORE, 140723946201088, 140723946213375,
+STORE, 140152534343680, 140152534372351,
+STORE, 140152534335488, 140152534343679,
+STORE, 140152530018304, 140152532131839,
+SNULL, 140152530018304, 140152530030591,
+STORE, 140152530030592, 140152532131839,
+STORE, 140152530018304, 140152530030591,
+SNULL, 140152532123647, 140152532131839,
+STORE, 140152530030592, 140152532123647,
+STORE, 140152532123648, 140152532131839,
+ERASE, 140152532123648, 140152532131839,
+STORE, 140152532123648, 140152532131839,
+STORE, 140152526221312, 140152530018303,
+SNULL, 140152526221312, 140152527880191,
+STORE, 140152527880192, 140152530018303,
+STORE, 140152526221312, 140152527880191,
+SNULL, 140152529977343, 140152530018303,
+STORE, 140152527880192, 140152529977343,
+STORE, 140152529977344, 140152530018303,
+SNULL, 140152529977344, 140152530001919,
+STORE, 140152530001920, 140152530018303,
+STORE, 140152529977344, 140152530001919,
+ERASE, 140152529977344, 140152530001919,
+STORE, 140152529977344, 140152530001919,
+ERASE, 140152530001920, 140152530018303,
+STORE, 140152530001920, 140152530018303,
+STORE, 140152534327296, 140152534343679,
+SNULL, 140152529993727, 140152530001919,
+STORE, 140152529977344, 140152529993727,
+STORE, 140152529993728, 140152530001919,
+SNULL, 140152532127743, 140152532131839,
+STORE, 140152532123648, 140152532127743,
+STORE, 140152532127744, 140152532131839,
+SNULL, 94886121619455, 94886121627647,
+STORE, 94886121615360, 94886121619455,
+STORE, 94886121619456, 94886121627647,
+SNULL, 140152534376447, 140152534380543,
+STORE, 140152534372352, 140152534376447,
+STORE, 140152534376448, 140152534380543,
+ERASE, 140152534343680, 140152534372351,
+STORE, 94886129770496, 94886129905663,
+STORE, 140152532643840, 140152534327295,
+STORE, 94886129770496, 94886130040831,
+STORE, 94886129770496, 94886130175999,
+STORE, 94886129770496, 94886130348031,
+SNULL, 94886130323455, 94886130348031,
+STORE, 94886129770496, 94886130323455,
+STORE, 94886130323456, 94886130348031,
+ERASE, 94886130323456, 94886130348031,
+STORE, 94886129770496, 94886130458623,
+STORE, 94886129770496, 94886130606079,
+SNULL, 94886130573311, 94886130606079,
+STORE, 94886129770496, 94886130573311,
+STORE, 94886130573312, 94886130606079,
+ERASE, 94886130573312, 94886130606079,
+STORE, 94886129770496, 94886130724863,
+STORE, 94886129770496, 94886130876415,
+STORE, 94886129770496, 94886131023871,
+STORE, 94886129770496, 94886131175423,
+STORE, 94886129770496, 94886131318783,
+STORE, 94886129770496, 94886131453951,
+SNULL, 94886131449855, 94886131453951,
+STORE, 94886129770496, 94886131449855,
+STORE, 94886131449856, 94886131453951,
+ERASE, 94886131449856, 94886131453951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735450779648, 140737488351231,
+SNULL, 140735450787839, 140737488351231,
+STORE, 140735450779648, 140735450787839,
+STORE, 140735450648576, 140735450787839,
+STORE, 93947794079744, 93947796414463,
+SNULL, 93947794292735, 93947796414463,
+STORE, 93947794079744, 93947794292735,
+STORE, 93947794292736, 93947796414463,
+ERASE, 93947794292736, 93947796414463,
+STORE, 93947796389888, 93947796402175,
+STORE, 93947796402176, 93947796414463,
+STORE, 139841993433088, 139841995685887,
+SNULL, 139841993576447, 139841995685887,
+STORE, 139841993433088, 139841993576447,
+STORE, 139841993576448, 139841995685887,
+ERASE, 139841993576448, 139841995685887,
+STORE, 139841995673600, 139841995681791,
+STORE, 139841995681792, 139841995685887,
+STORE, 140735451308032, 140735451312127,
+STORE, 140735451295744, 140735451308031,
+STORE, 139841995644928, 139841995673599,
+STORE, 139841995636736, 139841995644927,
+STORE, 139841991319552, 139841993433087,
+SNULL, 139841991319552, 139841991331839,
+STORE, 139841991331840, 139841993433087,
+STORE, 139841991319552, 139841991331839,
+SNULL, 139841993424895, 139841993433087,
+STORE, 139841991331840, 139841993424895,
+STORE, 139841993424896, 139841993433087,
+ERASE, 139841993424896, 139841993433087,
+STORE, 139841993424896, 139841993433087,
+STORE, 139841987522560, 139841991319551,
+SNULL, 139841987522560, 139841989181439,
+STORE, 139841989181440, 139841991319551,
+STORE, 139841987522560, 139841989181439,
+SNULL, 139841991278591, 139841991319551,
+STORE, 139841989181440, 139841991278591,
+STORE, 139841991278592, 139841991319551,
+SNULL, 139841991278592, 139841991303167,
+STORE, 139841991303168, 139841991319551,
+STORE, 139841991278592, 139841991303167,
+ERASE, 139841991278592, 139841991303167,
+STORE, 139841991278592, 139841991303167,
+ERASE, 139841991303168, 139841991319551,
+STORE, 139841991303168, 139841991319551,
+STORE, 139841995628544, 139841995644927,
+SNULL, 139841991294975, 139841991303167,
+STORE, 139841991278592, 139841991294975,
+STORE, 139841991294976, 139841991303167,
+SNULL, 139841993428991, 139841993433087,
+STORE, 139841993424896, 139841993428991,
+STORE, 139841993428992, 139841993433087,
+SNULL, 93947796393983, 93947796402175,
+STORE, 93947796389888, 93947796393983,
+STORE, 93947796393984, 93947796402175,
+SNULL, 139841995677695, 139841995681791,
+STORE, 139841995673600, 139841995677695,
+STORE, 139841995677696, 139841995681791,
+ERASE, 139841995644928, 139841995673599,
+STORE, 93947829739520, 93947829874687,
+STORE, 139841993945088, 139841995628543,
+STORE, 93947829739520, 93947830009855,
+STORE, 93947829739520, 93947830145023,
+STORE, 94659351814144, 94659352027135,
+STORE, 94659354124288, 94659354128383,
+STORE, 94659354128384, 94659354136575,
+STORE, 94659354136576, 94659354148863,
+STORE, 94659383476224, 94659385057279,
+STORE, 139959054557184, 139959056216063,
+STORE, 139959056216064, 139959058313215,
+STORE, 139959058313216, 139959058329599,
+STORE, 139959058329600, 139959058337791,
+STORE, 139959058337792, 139959058354175,
+STORE, 139959058354176, 139959058366463,
+STORE, 139959058366464, 139959060459519,
+STORE, 139959060459520, 139959060463615,
+STORE, 139959060463616, 139959060467711,
+STORE, 139959060467712, 139959060611071,
+STORE, 139959060979712, 139959062663167,
+STORE, 139959062663168, 139959062679551,
+STORE, 139959062708224, 139959062712319,
+STORE, 139959062712320, 139959062716415,
+STORE, 139959062716416, 139959062720511,
+STORE, 140735532539904, 140735532679167,
+STORE, 140735532830720, 140735532843007,
+STORE, 140735532843008, 140735532847103,
+STORE, 93894361829376, 93894362042367,
+STORE, 93894364139520, 93894364143615,
+STORE, 93894364143616, 93894364151807,
+STORE, 93894364151808, 93894364164095,
+STORE, 93894396944384, 93894397624319,
+STORE, 140075612573696, 140075614232575,
+STORE, 140075614232576, 140075616329727,
+STORE, 140075616329728, 140075616346111,
+STORE, 140075616346112, 140075616354303,
+STORE, 140075616354304, 140075616370687,
+STORE, 140075616370688, 140075616382975,
+STORE, 140075616382976, 140075618476031,
+STORE, 140075618476032, 140075618480127,
+STORE, 140075618480128, 140075618484223,
+STORE, 140075618484224, 140075618627583,
+STORE, 140075618996224, 140075620679679,
+STORE, 140075620679680, 140075620696063,
+STORE, 140075620724736, 140075620728831,
+STORE, 140075620728832, 140075620732927,
+STORE, 140075620732928, 140075620737023,
+STORE, 140720830312448, 140720830451711,
+STORE, 140720830631936, 140720830644223,
+STORE, 140720830644224, 140720830648319,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735116226560, 140737488351231,
+SNULL, 140735116234751, 140737488351231,
+STORE, 140735116226560, 140735116234751,
+STORE, 140735116095488, 140735116234751,
+STORE, 94873398054912, 94873400279039,
+SNULL, 94873398165503, 94873400279039,
+STORE, 94873398054912, 94873398165503,
+STORE, 94873398165504, 94873400279039,
+ERASE, 94873398165504, 94873400279039,
+STORE, 94873400258560, 94873400270847,
+STORE, 94873400270848, 94873400279039,
+STORE, 140303828606976, 140303830859775,
+SNULL, 140303828750335, 140303830859775,
+STORE, 140303828606976, 140303828750335,
+STORE, 140303828750336, 140303830859775,
+ERASE, 140303828750336, 140303830859775,
+STORE, 140303830847488, 140303830855679,
+STORE, 140303830855680, 140303830859775,
+STORE, 140735116251136, 140735116255231,
+STORE, 140735116238848, 140735116251135,
+STORE, 140303830818816, 140303830847487,
+STORE, 140303830810624, 140303830818815,
+STORE, 140303824809984, 140303828606975,
+SNULL, 140303824809984, 140303826468863,
+STORE, 140303826468864, 140303828606975,
+STORE, 140303824809984, 140303826468863,
+SNULL, 140303828566015, 140303828606975,
+STORE, 140303826468864, 140303828566015,
+STORE, 140303828566016, 140303828606975,
+SNULL, 140303828566016, 140303828590591,
+STORE, 140303828590592, 140303828606975,
+STORE, 140303828566016, 140303828590591,
+ERASE, 140303828566016, 140303828590591,
+STORE, 140303828566016, 140303828590591,
+ERASE, 140303828590592, 140303828606975,
+STORE, 140303828590592, 140303828606975,
+SNULL, 140303828582399, 140303828590591,
+STORE, 140303828566016, 140303828582399,
+STORE, 140303828582400, 140303828590591,
+SNULL, 94873400266751, 94873400270847,
+STORE, 94873400258560, 94873400266751,
+STORE, 94873400266752, 94873400270847,
+SNULL, 140303830851583, 140303830855679,
+STORE, 140303830847488, 140303830851583,
+STORE, 140303830851584, 140303830855679,
+ERASE, 140303830818816, 140303830847487,
+STORE, 94873413713920, 94873413849087,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140732349956096, 140737488351231,
+SNULL, 140732349964287, 140737488351231,
+STORE, 140732349956096, 140732349964287,
+STORE, 140732349825024, 140732349964287,
+STORE, 94009652736000, 94009655070719,
+SNULL, 94009652948991, 94009655070719,
+STORE, 94009652736000, 94009652948991,
+STORE, 94009652948992, 94009655070719,
+ERASE, 94009652948992, 94009655070719,
+STORE, 94009655046144, 94009655058431,
+STORE, 94009655058432, 94009655070719,
+STORE, 140295688531968, 140295690784767,
+SNULL, 140295688675327, 140295690784767,
+STORE, 140295688531968, 140295688675327,
+STORE, 140295688675328, 140295690784767,
+ERASE, 140295688675328, 140295690784767,
+STORE, 140295690772480, 140295690780671,
+STORE, 140295690780672, 140295690784767,
+STORE, 140732350005248, 140732350009343,
+STORE, 140732349992960, 140732350005247,
+STORE, 140295690743808, 140295690772479,
+STORE, 140295690735616, 140295690743807,
+STORE, 140295686418432, 140295688531967,
+SNULL, 140295686418432, 140295686430719,
+STORE, 140295686430720, 140295688531967,
+STORE, 140295686418432, 140295686430719,
+SNULL, 140295688523775, 140295688531967,
+STORE, 140295686430720, 140295688523775,
+STORE, 140295688523776, 140295688531967,
+ERASE, 140295688523776, 140295688531967,
+STORE, 140295688523776, 140295688531967,
+STORE, 140295682621440, 140295686418431,
+SNULL, 140295682621440, 140295684280319,
+STORE, 140295684280320, 140295686418431,
+STORE, 140295682621440, 140295684280319,
+SNULL, 140295686377471, 140295686418431,
+STORE, 140295684280320, 140295686377471,
+STORE, 140295686377472, 140295686418431,
+SNULL, 140295686377472, 140295686402047,
+STORE, 140295686402048, 140295686418431,
+STORE, 140295686377472, 140295686402047,
+ERASE, 140295686377472, 140295686402047,
+STORE, 140295686377472, 140295686402047,
+ERASE, 140295686402048, 140295686418431,
+STORE, 140295686402048, 140295686418431,
+STORE, 140295690727424, 140295690743807,
+SNULL, 140295686393855, 140295686402047,
+STORE, 140295686377472, 140295686393855,
+STORE, 140295686393856, 140295686402047,
+SNULL, 140295688527871, 140295688531967,
+STORE, 140295688523776, 140295688527871,
+STORE, 140295688527872, 140295688531967,
+SNULL, 94009655050239, 94009655058431,
+STORE, 94009655046144, 94009655050239,
+STORE, 94009655050240, 94009655058431,
+SNULL, 140295690776575, 140295690780671,
+STORE, 140295690772480, 140295690776575,
+STORE, 140295690776576, 140295690780671,
+ERASE, 140295690743808, 140295690772479,
+STORE, 94009672114176, 94009672249343,
+STORE, 140295689043968, 140295690727423,
+STORE, 94009672114176, 94009672384511,
+STORE, 94009672114176, 94009672519679,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722376515584, 140737488351231,
+SNULL, 140722376523775, 140737488351231,
+STORE, 140722376515584, 140722376523775,
+STORE, 140722376384512, 140722376523775,
+STORE, 94089815773184, 94089818107903,
+SNULL, 94089815986175, 94089818107903,
+STORE, 94089815773184, 94089815986175,
+STORE, 94089815986176, 94089818107903,
+ERASE, 94089815986176, 94089818107903,
+STORE, 94089818083328, 94089818095615,
+STORE, 94089818095616, 94089818107903,
+STORE, 140265595711488, 140265597964287,
+SNULL, 140265595854847, 140265597964287,
+STORE, 140265595711488, 140265595854847,
+STORE, 140265595854848, 140265597964287,
+ERASE, 140265595854848, 140265597964287,
+STORE, 140265597952000, 140265597960191,
+STORE, 140265597960192, 140265597964287,
+STORE, 140722378297344, 140722378301439,
+STORE, 140722378285056, 140722378297343,
+STORE, 140265597923328, 140265597951999,
+STORE, 140265597915136, 140265597923327,
+STORE, 140265593597952, 140265595711487,
+SNULL, 140265593597952, 140265593610239,
+STORE, 140265593610240, 140265595711487,
+STORE, 140265593597952, 140265593610239,
+SNULL, 140265595703295, 140265595711487,
+STORE, 140265593610240, 140265595703295,
+STORE, 140265595703296, 140265595711487,
+ERASE, 140265595703296, 140265595711487,
+STORE, 140265595703296, 140265595711487,
+STORE, 140265589800960, 140265593597951,
+SNULL, 140265589800960, 140265591459839,
+STORE, 140265591459840, 140265593597951,
+STORE, 140265589800960, 140265591459839,
+SNULL, 140265593556991, 140265593597951,
+STORE, 140265591459840, 140265593556991,
+STORE, 140265593556992, 140265593597951,
+SNULL, 140265593556992, 140265593581567,
+STORE, 140265593581568, 140265593597951,
+STORE, 140265593556992, 140265593581567,
+ERASE, 140265593556992, 140265593581567,
+STORE, 140265593556992, 140265593581567,
+ERASE, 140265593581568, 140265593597951,
+STORE, 140265593581568, 140265593597951,
+STORE, 140265597906944, 140265597923327,
+SNULL, 140265593573375, 140265593581567,
+STORE, 140265593556992, 140265593573375,
+STORE, 140265593573376, 140265593581567,
+SNULL, 140265595707391, 140265595711487,
+STORE, 140265595703296, 140265595707391,
+STORE, 140265595707392, 140265595711487,
+SNULL, 94089818087423, 94089818095615,
+STORE, 94089818083328, 94089818087423,
+STORE, 94089818087424, 94089818095615,
+SNULL, 140265597956095, 140265597960191,
+STORE, 140265597952000, 140265597956095,
+STORE, 140265597956096, 140265597960191,
+ERASE, 140265597923328, 140265597951999,
+STORE, 94089837146112, 94089837281279,
+STORE, 140265596223488, 140265597906943,
+STORE, 94089837146112, 94089837416447,
+STORE, 94089837146112, 94089837551615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735265218560, 140737488351231,
+SNULL, 140735265226751, 140737488351231,
+STORE, 140735265218560, 140735265226751,
+STORE, 140735265087488, 140735265226751,
+STORE, 94250422370304, 94250424705023,
+SNULL, 94250422583295, 94250424705023,
+STORE, 94250422370304, 94250422583295,
+STORE, 94250422583296, 94250424705023,
+ERASE, 94250422583296, 94250424705023,
+STORE, 94250424680448, 94250424692735,
+STORE, 94250424692736, 94250424705023,
+STORE, 140344442474496, 140344444727295,
+SNULL, 140344442617855, 140344444727295,
+STORE, 140344442474496, 140344442617855,
+STORE, 140344442617856, 140344444727295,
+ERASE, 140344442617856, 140344444727295,
+STORE, 140344444715008, 140344444723199,
+STORE, 140344444723200, 140344444727295,
+STORE, 140735265341440, 140735265345535,
+STORE, 140735265329152, 140735265341439,
+STORE, 140344444686336, 140344444715007,
+STORE, 140344444678144, 140344444686335,
+STORE, 140344440360960, 140344442474495,
+SNULL, 140344440360960, 140344440373247,
+STORE, 140344440373248, 140344442474495,
+STORE, 140344440360960, 140344440373247,
+SNULL, 140344442466303, 140344442474495,
+STORE, 140344440373248, 140344442466303,
+STORE, 140344442466304, 140344442474495,
+ERASE, 140344442466304, 140344442474495,
+STORE, 140344442466304, 140344442474495,
+STORE, 140344436563968, 140344440360959,
+SNULL, 140344436563968, 140344438222847,
+STORE, 140344438222848, 140344440360959,
+STORE, 140344436563968, 140344438222847,
+SNULL, 140344440319999, 140344440360959,
+STORE, 140344438222848, 140344440319999,
+STORE, 140344440320000, 140344440360959,
+SNULL, 140344440320000, 140344440344575,
+STORE, 140344440344576, 140344440360959,
+STORE, 140344440320000, 140344440344575,
+ERASE, 140344440320000, 140344440344575,
+STORE, 140344440320000, 140344440344575,
+ERASE, 140344440344576, 140344440360959,
+STORE, 140344440344576, 140344440360959,
+STORE, 140344444669952, 140344444686335,
+SNULL, 140344440336383, 140344440344575,
+STORE, 140344440320000, 140344440336383,
+STORE, 140344440336384, 140344440344575,
+SNULL, 140344442470399, 140344442474495,
+STORE, 140344442466304, 140344442470399,
+STORE, 140344442470400, 140344442474495,
+SNULL, 94250424684543, 94250424692735,
+STORE, 94250424680448, 94250424684543,
+STORE, 94250424684544, 94250424692735,
+SNULL, 140344444719103, 140344444723199,
+STORE, 140344444715008, 140344444719103,
+STORE, 140344444719104, 140344444723199,
+ERASE, 140344444686336, 140344444715007,
+STORE, 94250445512704, 94250445647871,
+STORE, 140344442986496, 140344444669951,
+STORE, 94250445512704, 94250445783039,
+STORE, 94250445512704, 94250445918207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725762719744, 140737488351231,
+SNULL, 140725762727935, 140737488351231,
+STORE, 140725762719744, 140725762727935,
+STORE, 140725762588672, 140725762727935,
+STORE, 94819009097728, 94819011432447,
+SNULL, 94819009310719, 94819011432447,
+STORE, 94819009097728, 94819009310719,
+STORE, 94819009310720, 94819011432447,
+ERASE, 94819009310720, 94819011432447,
+STORE, 94819011407872, 94819011420159,
+STORE, 94819011420160, 94819011432447,
+STORE, 139987985596416, 139987987849215,
+SNULL, 139987985739775, 139987987849215,
+STORE, 139987985596416, 139987985739775,
+STORE, 139987985739776, 139987987849215,
+ERASE, 139987985739776, 139987987849215,
+STORE, 139987987836928, 139987987845119,
+STORE, 139987987845120, 139987987849215,
+STORE, 140725763072000, 140725763076095,
+STORE, 140725763059712, 140725763071999,
+STORE, 139987987808256, 139987987836927,
+STORE, 139987987800064, 139987987808255,
+STORE, 139987983482880, 139987985596415,
+SNULL, 139987983482880, 139987983495167,
+STORE, 139987983495168, 139987985596415,
+STORE, 139987983482880, 139987983495167,
+SNULL, 139987985588223, 139987985596415,
+STORE, 139987983495168, 139987985588223,
+STORE, 139987985588224, 139987985596415,
+ERASE, 139987985588224, 139987985596415,
+STORE, 139987985588224, 139987985596415,
+STORE, 139987979685888, 139987983482879,
+SNULL, 139987979685888, 139987981344767,
+STORE, 139987981344768, 139987983482879,
+STORE, 139987979685888, 139987981344767,
+SNULL, 139987983441919, 139987983482879,
+STORE, 139987981344768, 139987983441919,
+STORE, 139987983441920, 139987983482879,
+SNULL, 139987983441920, 139987983466495,
+STORE, 139987983466496, 139987983482879,
+STORE, 139987983441920, 139987983466495,
+ERASE, 139987983441920, 139987983466495,
+STORE, 139987983441920, 139987983466495,
+ERASE, 139987983466496, 139987983482879,
+STORE, 139987983466496, 139987983482879,
+STORE, 139987987791872, 139987987808255,
+SNULL, 139987983458303, 139987983466495,
+STORE, 139987983441920, 139987983458303,
+STORE, 139987983458304, 139987983466495,
+SNULL, 139987985592319, 139987985596415,
+STORE, 139987985588224, 139987985592319,
+STORE, 139987985592320, 139987985596415,
+SNULL, 94819011411967, 94819011420159,
+STORE, 94819011407872, 94819011411967,
+STORE, 94819011411968, 94819011420159,
+SNULL, 139987987841023, 139987987845119,
+STORE, 139987987836928, 139987987841023,
+STORE, 139987987841024, 139987987845119,
+ERASE, 139987987808256, 139987987836927,
+STORE, 94819028176896, 94819028312063,
+STORE, 139987986108416, 139987987791871,
+STORE, 94819028176896, 94819028447231,
+STORE, 94819028176896, 94819028582399,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722475413504, 140737488351231,
+SNULL, 140722475421695, 140737488351231,
+STORE, 140722475413504, 140722475421695,
+STORE, 140722475282432, 140722475421695,
+STORE, 94620599119872, 94620601343999,
+SNULL, 94620599230463, 94620601343999,
+STORE, 94620599119872, 94620599230463,
+STORE, 94620599230464, 94620601343999,
+ERASE, 94620599230464, 94620601343999,
+STORE, 94620601323520, 94620601335807,
+STORE, 94620601335808, 94620601343999,
+STORE, 139891763060736, 139891765313535,
+SNULL, 139891763204095, 139891765313535,
+STORE, 139891763060736, 139891763204095,
+STORE, 139891763204096, 139891765313535,
+ERASE, 139891763204096, 139891765313535,
+STORE, 139891765301248, 139891765309439,
+STORE, 139891765309440, 139891765313535,
+STORE, 140722475700224, 140722475704319,
+STORE, 140722475687936, 140722475700223,
+STORE, 139891765272576, 139891765301247,
+STORE, 139891765264384, 139891765272575,
+STORE, 139891759263744, 139891763060735,
+SNULL, 139891759263744, 139891760922623,
+STORE, 139891760922624, 139891763060735,
+STORE, 139891759263744, 139891760922623,
+SNULL, 139891763019775, 139891763060735,
+STORE, 139891760922624, 139891763019775,
+STORE, 139891763019776, 139891763060735,
+SNULL, 139891763019776, 139891763044351,
+STORE, 139891763044352, 139891763060735,
+STORE, 139891763019776, 139891763044351,
+ERASE, 139891763019776, 139891763044351,
+STORE, 139891763019776, 139891763044351,
+ERASE, 139891763044352, 139891763060735,
+STORE, 139891763044352, 139891763060735,
+SNULL, 139891763036159, 139891763044351,
+STORE, 139891763019776, 139891763036159,
+STORE, 139891763036160, 139891763044351,
+SNULL, 94620601331711, 94620601335807,
+STORE, 94620601323520, 94620601331711,
+STORE, 94620601331712, 94620601335807,
+SNULL, 139891765305343, 139891765309439,
+STORE, 139891765301248, 139891765305343,
+STORE, 139891765305344, 139891765309439,
+ERASE, 139891765272576, 139891765301247,
+STORE, 94620610027520, 94620610162687,
+STORE, 94031976210432, 94031976423423,
+STORE, 94031978520576, 94031978524671,
+STORE, 94031978524672, 94031978532863,
+STORE, 94031978532864, 94031978545151,
+STORE, 94031990398976, 94031992565759,
+STORE, 140336240640000, 140336242298879,
+STORE, 140336242298880, 140336244396031,
+STORE, 140336244396032, 140336244412415,
+STORE, 140336244412416, 140336244420607,
+STORE, 140336244420608, 140336244436991,
+STORE, 140336244436992, 140336244449279,
+STORE, 140336244449280, 140336246542335,
+STORE, 140336246542336, 140336246546431,
+STORE, 140336246546432, 140336246550527,
+STORE, 140336246550528, 140336246693887,
+STORE, 140336247062528, 140336248745983,
+STORE, 140336248745984, 140336248762367,
+STORE, 140336248791040, 140336248795135,
+STORE, 140336248795136, 140336248799231,
+STORE, 140336248799232, 140336248803327,
+STORE, 140728500064256, 140728500203519,
+STORE, 140728501501952, 140728501514239,
+STORE, 140728501514240, 140728501518335,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730503987200, 140737488351231,
+SNULL, 140730503995391, 140737488351231,
+STORE, 140730503987200, 140730503995391,
+STORE, 140730503856128, 140730503995391,
+STORE, 93866544205824, 93866546429951,
+SNULL, 93866544316415, 93866546429951,
+STORE, 93866544205824, 93866544316415,
+STORE, 93866544316416, 93866546429951,
+ERASE, 93866544316416, 93866546429951,
+STORE, 93866546409472, 93866546421759,
+STORE, 93866546421760, 93866546429951,
+STORE, 140216311959552, 140216314212351,
+SNULL, 140216312102911, 140216314212351,
+STORE, 140216311959552, 140216312102911,
+STORE, 140216312102912, 140216314212351,
+ERASE, 140216312102912, 140216314212351,
+STORE, 140216314200064, 140216314208255,
+STORE, 140216314208256, 140216314212351,
+STORE, 140730504626176, 140730504630271,
+STORE, 140730504613888, 140730504626175,
+STORE, 140216314171392, 140216314200063,
+STORE, 140216314163200, 140216314171391,
+STORE, 140216308162560, 140216311959551,
+SNULL, 140216308162560, 140216309821439,
+STORE, 140216309821440, 140216311959551,
+STORE, 140216308162560, 140216309821439,
+SNULL, 140216311918591, 140216311959551,
+STORE, 140216309821440, 140216311918591,
+STORE, 140216311918592, 140216311959551,
+SNULL, 140216311918592, 140216311943167,
+STORE, 140216311943168, 140216311959551,
+STORE, 140216311918592, 140216311943167,
+ERASE, 140216311918592, 140216311943167,
+STORE, 140216311918592, 140216311943167,
+ERASE, 140216311943168, 140216311959551,
+STORE, 140216311943168, 140216311959551,
+SNULL, 140216311934975, 140216311943167,
+STORE, 140216311918592, 140216311934975,
+STORE, 140216311934976, 140216311943167,
+SNULL, 93866546417663, 93866546421759,
+STORE, 93866546409472, 93866546417663,
+STORE, 93866546417664, 93866546421759,
+SNULL, 140216314204159, 140216314208255,
+STORE, 140216314200064, 140216314204159,
+STORE, 140216314204160, 140216314208255,
+ERASE, 140216314171392, 140216314200063,
+STORE, 93866550386688, 93866550521855,
+STORE, 94074292674560, 94074292887551,
+STORE, 94074294984704, 94074294988799,
+STORE, 94074294988800, 94074294996991,
+STORE, 94074294996992, 94074295009279,
+STORE, 94074300219392, 94074301378559,
+STORE, 139781563256832, 139781564915711,
+STORE, 139781564915712, 139781567012863,
+STORE, 139781567012864, 139781567029247,
+STORE, 139781567029248, 139781567037439,
+STORE, 139781567037440, 139781567053823,
+STORE, 139781567053824, 139781567066111,
+STORE, 139781567066112, 139781569159167,
+STORE, 139781569159168, 139781569163263,
+STORE, 139781569163264, 139781569167359,
+STORE, 139781569167360, 139781569310719,
+STORE, 139781569679360, 139781571362815,
+STORE, 139781571362816, 139781571379199,
+STORE, 139781571407872, 139781571411967,
+STORE, 139781571411968, 139781571416063,
+STORE, 139781571416064, 139781571420159,
+STORE, 140723688488960, 140723688628223,
+STORE, 140723689005056, 140723689017343,
+STORE, 140723689017344, 140723689021439,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735189745664, 140737488351231,
+SNULL, 140735189753855, 140737488351231,
+STORE, 140735189745664, 140735189753855,
+STORE, 140735189614592, 140735189753855,
+STORE, 94172072177664, 94172074512383,
+SNULL, 94172072390655, 94172074512383,
+STORE, 94172072177664, 94172072390655,
+STORE, 94172072390656, 94172074512383,
+ERASE, 94172072390656, 94172074512383,
+STORE, 94172074487808, 94172074500095,
+STORE, 94172074500096, 94172074512383,
+STORE, 140687827263488, 140687829516287,
+SNULL, 140687827406847, 140687829516287,
+STORE, 140687827263488, 140687827406847,
+STORE, 140687827406848, 140687829516287,
+ERASE, 140687827406848, 140687829516287,
+STORE, 140687829504000, 140687829512191,
+STORE, 140687829512192, 140687829516287,
+STORE, 140735189766144, 140735189770239,
+STORE, 140735189753856, 140735189766143,
+STORE, 140687829475328, 140687829503999,
+STORE, 140687829467136, 140687829475327,
+STORE, 140687825149952, 140687827263487,
+SNULL, 140687825149952, 140687825162239,
+STORE, 140687825162240, 140687827263487,
+STORE, 140687825149952, 140687825162239,
+SNULL, 140687827255295, 140687827263487,
+STORE, 140687825162240, 140687827255295,
+STORE, 140687827255296, 140687827263487,
+ERASE, 140687827255296, 140687827263487,
+STORE, 140687827255296, 140687827263487,
+STORE, 140687821352960, 140687825149951,
+SNULL, 140687821352960, 140687823011839,
+STORE, 140687823011840, 140687825149951,
+STORE, 140687821352960, 140687823011839,
+SNULL, 140687825108991, 140687825149951,
+STORE, 140687823011840, 140687825108991,
+STORE, 140687825108992, 140687825149951,
+SNULL, 140687825108992, 140687825133567,
+STORE, 140687825133568, 140687825149951,
+STORE, 140687825108992, 140687825133567,
+ERASE, 140687825108992, 140687825133567,
+STORE, 140687825108992, 140687825133567,
+ERASE, 140687825133568, 140687825149951,
+STORE, 140687825133568, 140687825149951,
+STORE, 140687829458944, 140687829475327,
+SNULL, 140687825125375, 140687825133567,
+STORE, 140687825108992, 140687825125375,
+STORE, 140687825125376, 140687825133567,
+SNULL, 140687827259391, 140687827263487,
+STORE, 140687827255296, 140687827259391,
+STORE, 140687827259392, 140687827263487,
+SNULL, 94172074491903, 94172074500095,
+STORE, 94172074487808, 94172074491903,
+STORE, 94172074491904, 94172074500095,
+SNULL, 140687829508095, 140687829512191,
+STORE, 140687829504000, 140687829508095,
+STORE, 140687829508096, 140687829512191,
+ERASE, 140687829475328, 140687829503999,
+STORE, 94172092432384, 94172092567551,
+STORE, 140687827775488, 140687829458943,
+STORE, 94172092432384, 94172092702719,
+STORE, 94172092432384, 94172092837887,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737229504512, 140737488351231,
+SNULL, 140737229512703, 140737488351231,
+STORE, 140737229504512, 140737229512703,
+STORE, 140737229373440, 140737229512703,
+STORE, 94155246866432, 94155249090559,
+SNULL, 94155246977023, 94155249090559,
+STORE, 94155246866432, 94155246977023,
+STORE, 94155246977024, 94155249090559,
+ERASE, 94155246977024, 94155249090559,
+STORE, 94155249070080, 94155249082367,
+STORE, 94155249082368, 94155249090559,
+STORE, 140640993693696, 140640995946495,
+SNULL, 140640993837055, 140640995946495,
+STORE, 140640993693696, 140640993837055,
+STORE, 140640993837056, 140640995946495,
+ERASE, 140640993837056, 140640995946495,
+STORE, 140640995934208, 140640995942399,
+STORE, 140640995942400, 140640995946495,
+STORE, 140737230004224, 140737230008319,
+STORE, 140737229991936, 140737230004223,
+STORE, 140640995905536, 140640995934207,
+STORE, 140640995897344, 140640995905535,
+STORE, 140640989896704, 140640993693695,
+SNULL, 140640989896704, 140640991555583,
+STORE, 140640991555584, 140640993693695,
+STORE, 140640989896704, 140640991555583,
+SNULL, 140640993652735, 140640993693695,
+STORE, 140640991555584, 140640993652735,
+STORE, 140640993652736, 140640993693695,
+SNULL, 140640993652736, 140640993677311,
+STORE, 140640993677312, 140640993693695,
+STORE, 140640993652736, 140640993677311,
+ERASE, 140640993652736, 140640993677311,
+STORE, 140640993652736, 140640993677311,
+ERASE, 140640993677312, 140640993693695,
+STORE, 140640993677312, 140640993693695,
+SNULL, 140640993669119, 140640993677311,
+STORE, 140640993652736, 140640993669119,
+STORE, 140640993669120, 140640993677311,
+SNULL, 94155249078271, 94155249082367,
+STORE, 94155249070080, 94155249078271,
+STORE, 94155249078272, 94155249082367,
+SNULL, 140640995938303, 140640995942399,
+STORE, 140640995934208, 140640995938303,
+STORE, 140640995938304, 140640995942399,
+ERASE, 140640995905536, 140640995934207,
+STORE, 94155281035264, 94155281170431,
+STORE, 94088066453504, 94088066564095,
+STORE, 94088068657152, 94088068665343,
+STORE, 94088068665344, 94088068669439,
+STORE, 94088068669440, 94088068677631,
+STORE, 94088090214400, 94088090349567,
+STORE, 140503024627712, 140503026286591,
+STORE, 140503026286592, 140503028383743,
+STORE, 140503028383744, 140503028400127,
+STORE, 140503028400128, 140503028408319,
+STORE, 140503028408320, 140503028424703,
+STORE, 140503028424704, 140503028568063,
+STORE, 140503030628352, 140503030636543,
+STORE, 140503030665216, 140503030669311,
+STORE, 140503030669312, 140503030673407,
+STORE, 140503030673408, 140503030677503,
+STORE, 140730894725120, 140730894864383,
+STORE, 140730894880768, 140730894893055,
+STORE, 140730894893056, 140730894897151,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730434342912, 140737488351231,
+SNULL, 140730434351103, 140737488351231,
+STORE, 140730434342912, 140730434351103,
+STORE, 140730434211840, 140730434351103,
+STORE, 4194304, 5128191,
+STORE, 7221248, 7241727,
+STORE, 7241728, 7249919,
+STORE, 140109041938432, 140109044191231,
+SNULL, 140109042081791, 140109044191231,
+STORE, 140109041938432, 140109042081791,
+STORE, 140109042081792, 140109044191231,
+ERASE, 140109042081792, 140109044191231,
+STORE, 140109044178944, 140109044187135,
+STORE, 140109044187136, 140109044191231,
+STORE, 140730434850816, 140730434854911,
+STORE, 140730434838528, 140730434850815,
+STORE, 140109044150272, 140109044178943,
+STORE, 140109044142080, 140109044150271,
+STORE, 140109038776320, 140109041938431,
+SNULL, 140109038776320, 140109039837183,
+STORE, 140109039837184, 140109041938431,
+STORE, 140109038776320, 140109039837183,
+SNULL, 140109041930239, 140109041938431,
+STORE, 140109039837184, 140109041930239,
+STORE, 140109041930240, 140109041938431,
+ERASE, 140109041930240, 140109041938431,
+STORE, 140109041930240, 140109041938431,
+STORE, 140109034979328, 140109038776319,
+SNULL, 140109034979328, 140109036638207,
+STORE, 140109036638208, 140109038776319,
+STORE, 140109034979328, 140109036638207,
+SNULL, 140109038735359, 140109038776319,
+STORE, 140109036638208, 140109038735359,
+STORE, 140109038735360, 140109038776319,
+SNULL, 140109038735360, 140109038759935,
+STORE, 140109038759936, 140109038776319,
+STORE, 140109038735360, 140109038759935,
+ERASE, 140109038735360, 140109038759935,
+STORE, 140109038735360, 140109038759935,
+ERASE, 140109038759936, 140109038776319,
+STORE, 140109038759936, 140109038776319,
+STORE, 140109044129792, 140109044150271,
+SNULL, 140109038751743, 140109038759935,
+STORE, 140109038735360, 140109038751743,
+STORE, 140109038751744, 140109038759935,
+SNULL, 140109041934335, 140109041938431,
+STORE, 140109041930240, 140109041934335,
+STORE, 140109041934336, 140109041938431,
+SNULL, 7233535, 7241727,
+STORE, 7221248, 7233535,
+STORE, 7233536, 7241727,
+SNULL, 140109044183039, 140109044187135,
+STORE, 140109044178944, 140109044183039,
+STORE, 140109044183040, 140109044187135,
+ERASE, 140109044150272, 140109044178943,
+STORE, 20000768, 20135935,
+STORE, 20000768, 20283391,
+STORE, 140109042446336, 140109044129791,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140730853408768, 140737488351231,
+SNULL, 140730853416959, 140737488351231,
+STORE, 140730853408768, 140730853416959,
+STORE, 140730853277696, 140730853416959,
+STORE, 94865902977024, 94865905311743,
+SNULL, 94865903190015, 94865905311743,
+STORE, 94865902977024, 94865903190015,
+STORE, 94865903190016, 94865905311743,
+ERASE, 94865903190016, 94865905311743,
+STORE, 94865905287168, 94865905299455,
+STORE, 94865905299456, 94865905311743,
+STORE, 139768865738752, 139768867991551,
+SNULL, 139768865882111, 139768867991551,
+STORE, 139768865738752, 139768865882111,
+STORE, 139768865882112, 139768867991551,
+ERASE, 139768865882112, 139768867991551,
+STORE, 139768867979264, 139768867987455,
+STORE, 139768867987456, 139768867991551,
+STORE, 140730853957632, 140730853961727,
+STORE, 140730853945344, 140730853957631,
+STORE, 139768867950592, 139768867979263,
+STORE, 139768867942400, 139768867950591,
+STORE, 139768863625216, 139768865738751,
+SNULL, 139768863625216, 139768863637503,
+STORE, 139768863637504, 139768865738751,
+STORE, 139768863625216, 139768863637503,
+SNULL, 139768865730559, 139768865738751,
+STORE, 139768863637504, 139768865730559,
+STORE, 139768865730560, 139768865738751,
+ERASE, 139768865730560, 139768865738751,
+STORE, 139768865730560, 139768865738751,
+STORE, 139768859828224, 139768863625215,
+SNULL, 139768859828224, 139768861487103,
+STORE, 139768861487104, 139768863625215,
+STORE, 139768859828224, 139768861487103,
+SNULL, 139768863584255, 139768863625215,
+STORE, 139768861487104, 139768863584255,
+STORE, 139768863584256, 139768863625215,
+SNULL, 139768863584256, 139768863608831,
+STORE, 139768863608832, 139768863625215,
+STORE, 139768863584256, 139768863608831,
+ERASE, 139768863584256, 139768863608831,
+STORE, 139768863584256, 139768863608831,
+ERASE, 139768863608832, 139768863625215,
+STORE, 139768863608832, 139768863625215,
+STORE, 139768867934208, 139768867950591,
+SNULL, 139768863600639, 139768863608831,
+STORE, 139768863584256, 139768863600639,
+STORE, 139768863600640, 139768863608831,
+SNULL, 139768865734655, 139768865738751,
+STORE, 139768865730560, 139768865734655,
+STORE, 139768865734656, 139768865738751,
+SNULL, 94865905291263, 94865905299455,
+STORE, 94865905287168, 94865905291263,
+STORE, 94865905291264, 94865905299455,
+SNULL, 139768867983359, 139768867987455,
+STORE, 139768867979264, 139768867983359,
+STORE, 139768867983360, 139768867987455,
+ERASE, 139768867950592, 139768867979263,
+STORE, 94865923670016, 94865923805183,
+STORE, 139768866250752, 139768867934207,
+STORE, 94865923670016, 94865923940351,
+STORE, 94865923670016, 94865924075519,
+STORE, 94865923670016, 94865924222975,
+SNULL, 94865924210687, 94865924222975,
+STORE, 94865923670016, 94865924210687,
+STORE, 94865924210688, 94865924222975,
+ERASE, 94865924210688, 94865924222975,
+STORE, 94865923670016, 94865924349951,
+STORE, 94865923670016, 94865924493311,
+STORE, 94865923670016, 94865924640767,
+SNULL, 94865924603903, 94865924640767,
+STORE, 94865923670016, 94865924603903,
+STORE, 94865924603904, 94865924640767,
+ERASE, 94865924603904, 94865924640767,
+STORE, 94865923670016, 94865924747263,
+STORE, 94865923670016, 94865924898815,
+SNULL, 94865924874239, 94865924898815,
+STORE, 94865923670016, 94865924874239,
+STORE, 94865924874240, 94865924898815,
+ERASE, 94865924874240, 94865924898815,
+STORE, 94865923670016, 94865925025791,
+SNULL, 94865925013503, 94865925025791,
+STORE, 94865923670016, 94865925013503,
+STORE, 94865925013504, 94865925025791,
+ERASE, 94865925013504, 94865925025791,
+SNULL, 94865924988927, 94865925013503,
+STORE, 94865923670016, 94865924988927,
+STORE, 94865924988928, 94865925013503,
+ERASE, 94865924988928, 94865925013503,
+STORE, 94865923670016, 94865925152767,
+SNULL, 94865925136383, 94865925152767,
+STORE, 94865923670016, 94865925136383,
+STORE, 94865925136384, 94865925152767,
+ERASE, 94865925136384, 94865925152767,
+STORE, 94865923670016, 94865925292031,
+SNULL, 94865925279743, 94865925292031,
+STORE, 94865923670016, 94865925279743,
+STORE, 94865925279744, 94865925292031,
+ERASE, 94865925279744, 94865925292031,
+SNULL, 94865925255167, 94865925279743,
+STORE, 94865923670016, 94865925255167,
+STORE, 94865925255168, 94865925279743,
+ERASE, 94865925255168, 94865925279743,
+STORE, 94865923670016, 94865925406719,
+SNULL, 94865925394431, 94865925406719,
+STORE, 94865923670016, 94865925394431,
+STORE, 94865925394432, 94865925406719,
+ERASE, 94865925394432, 94865925406719,
+STORE, 94865923670016, 94865925545983,
+SNULL, 94865925533695, 94865925545983,
+STORE, 94865923670016, 94865925533695,
+STORE, 94865925533696, 94865925545983,
+ERASE, 94865925533696, 94865925545983,
+SNULL, 94865925492735, 94865925533695,
+STORE, 94865923670016, 94865925492735,
+STORE, 94865925492736, 94865925533695,
+ERASE, 94865925492736, 94865925533695,
+STORE, 94865923670016, 94865925627903,
+SNULL, 94865925599231, 94865925627903,
+STORE, 94865923670016, 94865925599231,
+STORE, 94865925599232, 94865925627903,
+ERASE, 94865925599232, 94865925627903,
+STORE, 94865923670016, 94865925738495,
+SNULL, 94865925726207, 94865925738495,
+STORE, 94865923670016, 94865925726207,
+STORE, 94865925726208, 94865925738495,
+ERASE, 94865925726208, 94865925738495,
+STORE, 94865923670016, 94865925877759,
+SNULL, 94865925865471, 94865925877759,
+STORE, 94865923670016, 94865925865471,
+STORE, 94865925865472, 94865925877759,
+ERASE, 94865925865472, 94865925877759,
+STORE, 94865923670016, 94865926021119,
+SNULL, 94865926008831, 94865926021119,
+STORE, 94865923670016, 94865926008831,
+STORE, 94865926008832, 94865926021119,
+ERASE, 94865926008832, 94865926021119,
+SNULL, 94865925971967, 94865926008831,
+STORE, 94865923670016, 94865925971967,
+STORE, 94865925971968, 94865926008831,
+ERASE, 94865925971968, 94865926008831,
+STORE, 94865923670016, 94865926115327,
+STORE, 94865923670016, 94865926254591,
+SNULL, 94865926246399, 94865926254591,
+STORE, 94865923670016, 94865926246399,
+STORE, 94865926246400, 94865926254591,
+ERASE, 94865926246400, 94865926254591,
+STORE, 94865923670016, 94865926385663,
+STORE, 94865923670016, 94865926537215,
+STORE, 94865923670016, 94865926672383,
+STORE, 94865923670016, 94865926815743,
+STORE, 94865923670016, 94865926955007,
+STORE, 94865923670016, 94865927094271,
+STORE, 94865923670016, 94865927233535,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140731148435456, 140737488351231,
+SNULL, 140731148443647, 140737488351231,
+STORE, 140731148435456, 140731148443647,
+STORE, 140731148304384, 140731148443647,
+STORE, 94090775400448, 94090777735167,
+SNULL, 94090775613439, 94090777735167,
+STORE, 94090775400448, 94090775613439,
+STORE, 94090775613440, 94090777735167,
+ERASE, 94090775613440, 94090777735167,
+STORE, 94090777710592, 94090777722879,
+STORE, 94090777722880, 94090777735167,
+STORE, 140301090283520, 140301092536319,
+SNULL, 140301090426879, 140301092536319,
+STORE, 140301090283520, 140301090426879,
+STORE, 140301090426880, 140301092536319,
+ERASE, 140301090426880, 140301092536319,
+STORE, 140301092524032, 140301092532223,
+STORE, 140301092532224, 140301092536319,
+STORE, 140731148570624, 140731148574719,
+STORE, 140731148558336, 140731148570623,
+STORE, 140301092495360, 140301092524031,
+STORE, 140301092487168, 140301092495359,
+STORE, 140301088169984, 140301090283519,
+SNULL, 140301088169984, 140301088182271,
+STORE, 140301088182272, 140301090283519,
+STORE, 140301088169984, 140301088182271,
+SNULL, 140301090275327, 140301090283519,
+STORE, 140301088182272, 140301090275327,
+STORE, 140301090275328, 140301090283519,
+ERASE, 140301090275328, 140301090283519,
+STORE, 140301090275328, 140301090283519,
+STORE, 140301084372992, 140301088169983,
+SNULL, 140301084372992, 140301086031871,
+STORE, 140301086031872, 140301088169983,
+STORE, 140301084372992, 140301086031871,
+SNULL, 140301088129023, 140301088169983,
+STORE, 140301086031872, 140301088129023,
+STORE, 140301088129024, 140301088169983,
+SNULL, 140301088129024, 140301088153599,
+STORE, 140301088153600, 140301088169983,
+STORE, 140301088129024, 140301088153599,
+ERASE, 140301088129024, 140301088153599,
+STORE, 140301088129024, 140301088153599,
+ERASE, 140301088153600, 140301088169983,
+STORE, 140301088153600, 140301088169983,
+STORE, 140301092478976, 140301092495359,
+SNULL, 140301088145407, 140301088153599,
+STORE, 140301088129024, 140301088145407,
+STORE, 140301088145408, 140301088153599,
+SNULL, 140301090279423, 140301090283519,
+STORE, 140301090275328, 140301090279423,
+STORE, 140301090279424, 140301090283519,
+SNULL, 94090777714687, 94090777722879,
+STORE, 94090777710592, 94090777714687,
+STORE, 94090777714688, 94090777722879,
+SNULL, 140301092528127, 140301092532223,
+STORE, 140301092524032, 140301092528127,
+STORE, 140301092528128, 140301092532223,
+ERASE, 140301092495360, 140301092524031,
+STORE, 94090794590208, 94090794725375,
+STORE, 140301090795520, 140301092478975,
+STORE, 94090794590208, 94090794860543,
+STORE, 94090794590208, 94090794995711,
+STORE, 94090794590208, 94090795163647,
+SNULL, 94090795139071, 94090795163647,
+STORE, 94090794590208, 94090795139071,
+STORE, 94090795139072, 94090795163647,
+ERASE, 94090795139072, 94090795163647,
+STORE, 94090794590208, 94090795278335,
+STORE, 94090794590208, 94090795425791,
+SNULL, 94090795388927, 94090795425791,
+STORE, 94090794590208, 94090795388927,
+STORE, 94090795388928, 94090795425791,
+ERASE, 94090795388928, 94090795425791,
+STORE, 94090794590208, 94090795528191,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733084430336, 140737488351231,
+SNULL, 140733084438527, 140737488351231,
+STORE, 140733084430336, 140733084438527,
+STORE, 140733084299264, 140733084438527,
+STORE, 94116169183232, 94116171517951,
+SNULL, 94116169396223, 94116171517951,
+STORE, 94116169183232, 94116169396223,
+STORE, 94116169396224, 94116171517951,
+ERASE, 94116169396224, 94116171517951,
+STORE, 94116171493376, 94116171505663,
+STORE, 94116171505664, 94116171517951,
+STORE, 139772214128640, 139772216381439,
+SNULL, 139772214271999, 139772216381439,
+STORE, 139772214128640, 139772214271999,
+STORE, 139772214272000, 139772216381439,
+ERASE, 139772214272000, 139772216381439,
+STORE, 139772216369152, 139772216377343,
+STORE, 139772216377344, 139772216381439,
+STORE, 140733085270016, 140733085274111,
+STORE, 140733085257728, 140733085270015,
+STORE, 139772216340480, 139772216369151,
+STORE, 139772216332288, 139772216340479,
+STORE, 139772212015104, 139772214128639,
+SNULL, 139772212015104, 139772212027391,
+STORE, 139772212027392, 139772214128639,
+STORE, 139772212015104, 139772212027391,
+SNULL, 139772214120447, 139772214128639,
+STORE, 139772212027392, 139772214120447,
+STORE, 139772214120448, 139772214128639,
+ERASE, 139772214120448, 139772214128639,
+STORE, 139772214120448, 139772214128639,
+STORE, 139772208218112, 139772212015103,
+SNULL, 139772208218112, 139772209876991,
+STORE, 139772209876992, 139772212015103,
+STORE, 139772208218112, 139772209876991,
+SNULL, 139772211974143, 139772212015103,
+STORE, 139772209876992, 139772211974143,
+STORE, 139772211974144, 139772212015103,
+SNULL, 139772211974144, 139772211998719,
+STORE, 139772211998720, 139772212015103,
+STORE, 139772211974144, 139772211998719,
+ERASE, 139772211974144, 139772211998719,
+STORE, 139772211974144, 139772211998719,
+ERASE, 139772211998720, 139772212015103,
+STORE, 139772211998720, 139772212015103,
+STORE, 139772216324096, 139772216340479,
+SNULL, 139772211990527, 139772211998719,
+STORE, 139772211974144, 139772211990527,
+STORE, 139772211990528, 139772211998719,
+SNULL, 139772214124543, 139772214128639,
+STORE, 139772214120448, 139772214124543,
+STORE, 139772214124544, 139772214128639,
+SNULL, 94116171497471, 94116171505663,
+STORE, 94116171493376, 94116171497471,
+STORE, 94116171497472, 94116171505663,
+SNULL, 139772216373247, 139772216377343,
+STORE, 139772216369152, 139772216373247,
+STORE, 139772216373248, 139772216377343,
+ERASE, 139772216340480, 139772216369151,
+STORE, 94116199383040, 94116199518207,
+STORE, 139772214640640, 139772216324095,
+STORE, 94116199383040, 94116199653375,
+STORE, 94116199383040, 94116199788543,
+STORE, 140737488347136, 140737488351231,
+STORE, 140726067826688, 140737488351231,
+SNULL, 140726067830783, 140737488351231,
+STORE, 140726067826688, 140726067830783,
+STORE, 140726067695616, 140726067830783,
+STORE, 94535150673920, 94535152898047,
+SNULL, 94535150784511, 94535152898047,
+STORE, 94535150673920, 94535150784511,
+STORE, 94535150784512, 94535152898047,
+ERASE, 94535150784512, 94535152898047,
+STORE, 94535152877568, 94535152889855,
+STORE, 94535152889856, 94535152898047,
+STORE, 140381257314304, 140381259567103,
+SNULL, 140381257457663, 140381259567103,
+STORE, 140381257314304, 140381257457663,
+STORE, 140381257457664, 140381259567103,
+ERASE, 140381257457664, 140381259567103,
+STORE, 140381259554816, 140381259563007,
+STORE, 140381259563008, 140381259567103,
+STORE, 140726068060160, 140726068064255,
+STORE, 140726068047872, 140726068060159,
+STORE, 140381259526144, 140381259554815,
+STORE, 140381259517952, 140381259526143,
+STORE, 140381253517312, 140381257314303,
+SNULL, 140381253517312, 140381255176191,
+STORE, 140381255176192, 140381257314303,
+STORE, 140381253517312, 140381255176191,
+SNULL, 140381257273343, 140381257314303,
+STORE, 140381255176192, 140381257273343,
+STORE, 140381257273344, 140381257314303,
+SNULL, 140381257273344, 140381257297919,
+STORE, 140381257297920, 140381257314303,
+STORE, 140381257273344, 140381257297919,
+ERASE, 140381257273344, 140381257297919,
+STORE, 140381257273344, 140381257297919,
+ERASE, 140381257297920, 140381257314303,
+STORE, 140381257297920, 140381257314303,
+SNULL, 140381257289727, 140381257297919,
+STORE, 140381257273344, 140381257289727,
+STORE, 140381257289728, 140381257297919,
+SNULL, 94535152885759, 94535152889855,
+STORE, 94535152877568, 94535152885759,
+STORE, 94535152885760, 94535152889855,
+SNULL, 140381259558911, 140381259563007,
+STORE, 140381259554816, 140381259558911,
+STORE, 140381259558912, 140381259563007,
+ERASE, 140381259526144, 140381259554815,
+STORE, 94535186296832, 94535186431999,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140729189425152, 140737488351231,
+SNULL, 140729189433343, 140737488351231,
+STORE, 140729189425152, 140729189433343,
+STORE, 140729189294080, 140729189433343,
+STORE, 94428200128512, 94428202352639,
+SNULL, 94428200239103, 94428202352639,
+STORE, 94428200128512, 94428200239103,
+STORE, 94428200239104, 94428202352639,
+ERASE, 94428200239104, 94428202352639,
+STORE, 94428202332160, 94428202344447,
+STORE, 94428202344448, 94428202352639,
+STORE, 139707216986112, 139707219238911,
+SNULL, 139707217129471, 139707219238911,
+STORE, 139707216986112, 139707217129471,
+STORE, 139707217129472, 139707219238911,
+ERASE, 139707217129472, 139707219238911,
+STORE, 139707219226624, 139707219234815,
+STORE, 139707219234816, 139707219238911,
+STORE, 140729189785600, 140729189789695,
+STORE, 140729189773312, 140729189785599,
+STORE, 139707219197952, 139707219226623,
+STORE, 139707219189760, 139707219197951,
+STORE, 139707213189120, 139707216986111,
+SNULL, 139707213189120, 139707214847999,
+STORE, 139707214848000, 139707216986111,
+STORE, 139707213189120, 139707214847999,
+SNULL, 139707216945151, 139707216986111,
+STORE, 139707214848000, 139707216945151,
+STORE, 139707216945152, 139707216986111,
+SNULL, 139707216945152, 139707216969727,
+STORE, 139707216969728, 139707216986111,
+STORE, 139707216945152, 139707216969727,
+ERASE, 139707216945152, 139707216969727,
+STORE, 139707216945152, 139707216969727,
+ERASE, 139707216969728, 139707216986111,
+STORE, 139707216969728, 139707216986111,
+SNULL, 139707216961535, 139707216969727,
+STORE, 139707216945152, 139707216961535,
+STORE, 139707216961536, 139707216969727,
+SNULL, 94428202340351, 94428202344447,
+STORE, 94428202332160, 94428202340351,
+STORE, 94428202340352, 94428202344447,
+SNULL, 139707219230719, 139707219234815,
+STORE, 139707219226624, 139707219230719,
+STORE, 139707219230720, 139707219234815,
+ERASE, 139707219197952, 139707219226623,
+STORE, 94428208599040, 94428208734207,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722000953344, 140737488351231,
+SNULL, 140722000961535, 140737488351231,
+STORE, 140722000953344, 140722000961535,
+STORE, 140722000822272, 140722000961535,
+STORE, 94636494757888, 94636496982015,
+SNULL, 94636494868479, 94636496982015,
+STORE, 94636494757888, 94636494868479,
+STORE, 94636494868480, 94636496982015,
+ERASE, 94636494868480, 94636496982015,
+STORE, 94636496961536, 94636496973823,
+STORE, 94636496973824, 94636496982015,
+STORE, 140142275100672, 140142277353471,
+SNULL, 140142275244031, 140142277353471,
+STORE, 140142275100672, 140142275244031,
+STORE, 140142275244032, 140142277353471,
+ERASE, 140142275244032, 140142277353471,
+STORE, 140142277341184, 140142277349375,
+STORE, 140142277349376, 140142277353471,
+STORE, 140722002747392, 140722002751487,
+STORE, 140722002735104, 140722002747391,
+STORE, 140142277312512, 140142277341183,
+STORE, 140142277304320, 140142277312511,
+STORE, 140142271303680, 140142275100671,
+SNULL, 140142271303680, 140142272962559,
+STORE, 140142272962560, 140142275100671,
+STORE, 140142271303680, 140142272962559,
+SNULL, 140142275059711, 140142275100671,
+STORE, 140142272962560, 140142275059711,
+STORE, 140142275059712, 140142275100671,
+SNULL, 140142275059712, 140142275084287,
+STORE, 140142275084288, 140142275100671,
+STORE, 140142275059712, 140142275084287,
+ERASE, 140142275059712, 140142275084287,
+STORE, 140142275059712, 140142275084287,
+ERASE, 140142275084288, 140142275100671,
+STORE, 140142275084288, 140142275100671,
+SNULL, 140142275076095, 140142275084287,
+STORE, 140142275059712, 140142275076095,
+STORE, 140142275076096, 140142275084287,
+SNULL, 94636496969727, 94636496973823,
+STORE, 94636496961536, 94636496969727,
+STORE, 94636496969728, 94636496973823,
+SNULL, 140142277345279, 140142277349375,
+STORE, 140142277341184, 140142277345279,
+STORE, 140142277345280, 140142277349375,
+ERASE, 140142277312512, 140142277341183,
+STORE, 94636516286464, 94636516421631,
+STORE, 94071103692800, 94071103905791,
+STORE, 94071106002944, 94071106007039,
+STORE, 94071106007040, 94071106015231,
+STORE, 94071106015232, 94071106027519,
+STORE, 94071138521088, 94071140368383,
+STORE, 140145668190208, 140145669849087,
+STORE, 140145669849088, 140145671946239,
+STORE, 140145671946240, 140145671962623,
+STORE, 140145671962624, 140145671970815,
+STORE, 140145671970816, 140145671987199,
+STORE, 140145671987200, 140145671999487,
+STORE, 140145671999488, 140145674092543,
+STORE, 140145674092544, 140145674096639,
+STORE, 140145674096640, 140145674100735,
+STORE, 140145674100736, 140145674244095,
+STORE, 140145674612736, 140145676296191,
+STORE, 140145676296192, 140145676312575,
+STORE, 140145676341248, 140145676345343,
+STORE, 140145676345344, 140145676349439,
+STORE, 140145676349440, 140145676353535,
+STORE, 140734927740928, 140734927880191,
+STORE, 140734928842752, 140734928855039,
+STORE, 140734928855040, 140734928859135,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722342535168, 140737488351231,
+SNULL, 140722342543359, 140737488351231,
+STORE, 140722342535168, 140722342543359,
+STORE, 140722342404096, 140722342543359,
+STORE, 94399699714048, 94399702048767,
+SNULL, 94399699927039, 94399702048767,
+STORE, 94399699714048, 94399699927039,
+STORE, 94399699927040, 94399702048767,
+ERASE, 94399699927040, 94399702048767,
+STORE, 94399702024192, 94399702036479,
+STORE, 94399702036480, 94399702048767,
+STORE, 139811024748544, 139811027001343,
+SNULL, 139811024891903, 139811027001343,
+STORE, 139811024748544, 139811024891903,
+STORE, 139811024891904, 139811027001343,
+ERASE, 139811024891904, 139811027001343,
+STORE, 139811026989056, 139811026997247,
+STORE, 139811026997248, 139811027001343,
+STORE, 140722342707200, 140722342711295,
+STORE, 140722342694912, 140722342707199,
+STORE, 139811026960384, 139811026989055,
+STORE, 139811026952192, 139811026960383,
+STORE, 139811022635008, 139811024748543,
+SNULL, 139811022635008, 139811022647295,
+STORE, 139811022647296, 139811024748543,
+STORE, 139811022635008, 139811022647295,
+SNULL, 139811024740351, 139811024748543,
+STORE, 139811022647296, 139811024740351,
+STORE, 139811024740352, 139811024748543,
+ERASE, 139811024740352, 139811024748543,
+STORE, 139811024740352, 139811024748543,
+STORE, 139811018838016, 139811022635007,
+SNULL, 139811018838016, 139811020496895,
+STORE, 139811020496896, 139811022635007,
+STORE, 139811018838016, 139811020496895,
+SNULL, 139811022594047, 139811022635007,
+STORE, 139811020496896, 139811022594047,
+STORE, 139811022594048, 139811022635007,
+SNULL, 139811022594048, 139811022618623,
+STORE, 139811022618624, 139811022635007,
+STORE, 139811022594048, 139811022618623,
+ERASE, 139811022594048, 139811022618623,
+STORE, 139811022594048, 139811022618623,
+ERASE, 139811022618624, 139811022635007,
+STORE, 139811022618624, 139811022635007,
+STORE, 139811026944000, 139811026960383,
+SNULL, 139811022610431, 139811022618623,
+STORE, 139811022594048, 139811022610431,
+STORE, 139811022610432, 139811022618623,
+SNULL, 139811024744447, 139811024748543,
+STORE, 139811024740352, 139811024744447,
+STORE, 139811024744448, 139811024748543,
+SNULL, 94399702028287, 94399702036479,
+STORE, 94399702024192, 94399702028287,
+STORE, 94399702028288, 94399702036479,
+SNULL, 139811026993151, 139811026997247,
+STORE, 139811026989056, 139811026993151,
+STORE, 139811026993152, 139811026997247,
+ERASE, 139811026960384, 139811026989055,
+STORE, 94399723880448, 94399724015615,
+STORE, 139811025260544, 139811026943999,
+STORE, 94399723880448, 94399724150783,
+STORE, 94399723880448, 94399724285951,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140735364939776, 140737488351231,
+SNULL, 140735364947967, 140737488351231,
+STORE, 140735364939776, 140735364947967,
+STORE, 140735364808704, 140735364947967,
+STORE, 94421528674304, 94421531009023,
+SNULL, 94421528887295, 94421531009023,
+STORE, 94421528674304, 94421528887295,
+STORE, 94421528887296, 94421531009023,
+ERASE, 94421528887296, 94421531009023,
+STORE, 94421530984448, 94421530996735,
+STORE, 94421530996736, 94421531009023,
+STORE, 140162004742144, 140162006994943,
+SNULL, 140162004885503, 140162006994943,
+STORE, 140162004742144, 140162004885503,
+STORE, 140162004885504, 140162006994943,
+ERASE, 140162004885504, 140162006994943,
+STORE, 140162006982656, 140162006990847,
+STORE, 140162006990848, 140162006994943,
+STORE, 140735365402624, 140735365406719,
+STORE, 140735365390336, 140735365402623,
+STORE, 140162006953984, 140162006982655,
+STORE, 140162006945792, 140162006953983,
+STORE, 140162002628608, 140162004742143,
+SNULL, 140162002628608, 140162002640895,
+STORE, 140162002640896, 140162004742143,
+STORE, 140162002628608, 140162002640895,
+SNULL, 140162004733951, 140162004742143,
+STORE, 140162002640896, 140162004733951,
+STORE, 140162004733952, 140162004742143,
+ERASE, 140162004733952, 140162004742143,
+STORE, 140162004733952, 140162004742143,
+STORE, 140161998831616, 140162002628607,
+SNULL, 140161998831616, 140162000490495,
+STORE, 140162000490496, 140162002628607,
+STORE, 140161998831616, 140162000490495,
+SNULL, 140162002587647, 140162002628607,
+STORE, 140162000490496, 140162002587647,
+STORE, 140162002587648, 140162002628607,
+SNULL, 140162002587648, 140162002612223,
+STORE, 140162002612224, 140162002628607,
+STORE, 140162002587648, 140162002612223,
+ERASE, 140162002587648, 140162002612223,
+STORE, 140162002587648, 140162002612223,
+ERASE, 140162002612224, 140162002628607,
+STORE, 140162002612224, 140162002628607,
+STORE, 140162006937600, 140162006953983,
+SNULL, 140162002604031, 140162002612223,
+STORE, 140162002587648, 140162002604031,
+STORE, 140162002604032, 140162002612223,
+SNULL, 140162004738047, 140162004742143,
+STORE, 140162004733952, 140162004738047,
+STORE, 140162004738048, 140162004742143,
+SNULL, 94421530988543, 94421530996735,
+STORE, 94421530984448, 94421530988543,
+STORE, 94421530988544, 94421530996735,
+SNULL, 140162006986751, 140162006990847,
+STORE, 140162006982656, 140162006986751,
+STORE, 140162006986752, 140162006990847,
+ERASE, 140162006953984, 140162006982655,
+STORE, 94421551697920, 94421551833087,
+STORE, 140162005254144, 140162006937599,
+STORE, 94421551697920, 94421551968255,
+STORE, 94421551697920, 94421552103423,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140733498486784, 140737488351231,
+SNULL, 140733498494975, 140737488351231,
+STORE, 140733498486784, 140733498494975,
+STORE, 140733498355712, 140733498494975,
+STORE, 94567985836032, 94567988170751,
+SNULL, 94567986049023, 94567988170751,
+STORE, 94567985836032, 94567986049023,
+STORE, 94567986049024, 94567988170751,
+ERASE, 94567986049024, 94567988170751,
+STORE, 94567988146176, 94567988158463,
+STORE, 94567988158464, 94567988170751,
+STORE, 139634278572032, 139634280824831,
+SNULL, 139634278715391, 139634280824831,
+STORE, 139634278572032, 139634278715391,
+STORE, 139634278715392, 139634280824831,
+ERASE, 139634278715392, 139634280824831,
+STORE, 139634280812544, 139634280820735,
+STORE, 139634280820736, 139634280824831,
+STORE, 140733498544128, 140733498548223,
+STORE, 140733498531840, 140733498544127,
+STORE, 139634280783872, 139634280812543,
+STORE, 139634280775680, 139634280783871,
+STORE, 139634276458496, 139634278572031,
+SNULL, 139634276458496, 139634276470783,
+STORE, 139634276470784, 139634278572031,
+STORE, 139634276458496, 139634276470783,
+SNULL, 139634278563839, 139634278572031,
+STORE, 139634276470784, 139634278563839,
+STORE, 139634278563840, 139634278572031,
+ERASE, 139634278563840, 139634278572031,
+STORE, 139634278563840, 139634278572031,
+STORE, 139634272661504, 139634276458495,
+SNULL, 139634272661504, 139634274320383,
+STORE, 139634274320384, 139634276458495,
+STORE, 139634272661504, 139634274320383,
+SNULL, 139634276417535, 139634276458495,
+STORE, 139634274320384, 139634276417535,
+STORE, 139634276417536, 139634276458495,
+SNULL, 139634276417536, 139634276442111,
+STORE, 139634276442112, 139634276458495,
+STORE, 139634276417536, 139634276442111,
+ERASE, 139634276417536, 139634276442111,
+STORE, 139634276417536, 139634276442111,
+ERASE, 139634276442112, 139634276458495,
+STORE, 139634276442112, 139634276458495,
+STORE, 139634280767488, 139634280783871,
+SNULL, 139634276433919, 139634276442111,
+STORE, 139634276417536, 139634276433919,
+STORE, 139634276433920, 139634276442111,
+SNULL, 139634278567935, 139634278572031,
+STORE, 139634278563840, 139634278567935,
+STORE, 139634278567936, 139634278572031,
+SNULL, 94567988150271, 94567988158463,
+STORE, 94567988146176, 94567988150271,
+STORE, 94567988150272, 94567988158463,
+SNULL, 139634280816639, 139634280820735,
+STORE, 139634280812544, 139634280816639,
+STORE, 139634280816640, 139634280820735,
+ERASE, 139634280783872, 139634280812543,
+STORE, 94567996379136, 94567996514303,
+STORE, 139634279084032, 139634280767487,
+STORE, 94567996379136, 94567996649471,
+STORE, 94567996379136, 94567996784639,
+STORE, 94567996379136, 94567996960767,
+SNULL, 94567996932095, 94567996960767,
+STORE, 94567996379136, 94567996932095,
+STORE, 94567996932096, 94567996960767,
+ERASE, 94567996932096, 94567996960767,
+STORE, 94567996379136, 94567997071359,
+STORE, 94567996379136, 94567997206527,
+SNULL, 94567997186047, 94567997206527,
+STORE, 94567996379136, 94567997186047,
+STORE, 94567997186048, 94567997206527,
+ERASE, 94567997186048, 94567997206527,
+STORE, 94567996379136, 94567997358079,
+STORE, 94567996379136, 94567997493247,
+SNULL, 94567997476863, 94567997493247,
+STORE, 94567996379136, 94567997476863,
+STORE, 94567997476864, 94567997493247,
+ERASE, 94567997476864, 94567997493247,
+STORE, 94567996379136, 94567997612031,
+STORE, 94567996379136, 94567997767679,
+SNULL, 94567997739007, 94567997767679,
+STORE, 94567996379136, 94567997739007,
+STORE, 94567997739008, 94567997767679,
+ERASE, 94567997739008, 94567997767679,
+SNULL, 94567997698047, 94567997739007,
+STORE, 94567996379136, 94567997698047,
+STORE, 94567997698048, 94567997739007,
+ERASE, 94567997698048, 94567997739007,
+STORE, 94567996379136, 94567997853695,
+STORE, 94567996379136, 94567997988863,
+STORE, 94567996379136, 94567998132223,
+STORE, 94567996379136, 94567998275583,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140723667759104, 140737488351231,
+SNULL, 140723667767295, 140737488351231,
+STORE, 140723667759104, 140723667767295,
+STORE, 140723667628032, 140723667767295,
+STORE, 94231598800896, 94231601135615,
+SNULL, 94231599013887, 94231601135615,
+STORE, 94231598800896, 94231599013887,
+STORE, 94231599013888, 94231601135615,
+ERASE, 94231599013888, 94231601135615,
+STORE, 94231601111040, 94231601123327,
+STORE, 94231601123328, 94231601135615,
+STORE, 140269472649216, 140269474902015,
+SNULL, 140269472792575, 140269474902015,
+STORE, 140269472649216, 140269472792575,
+STORE, 140269472792576, 140269474902015,
+ERASE, 140269472792576, 140269474902015,
+STORE, 140269474889728, 140269474897919,
+STORE, 140269474897920, 140269474902015,
+STORE, 140723667836928, 140723667841023,
+STORE, 140723667824640, 140723667836927,
+STORE, 140269474861056, 140269474889727,
+STORE, 140269474852864, 140269474861055,
+STORE, 140269470535680, 140269472649215,
+SNULL, 140269470535680, 140269470547967,
+STORE, 140269470547968, 140269472649215,
+STORE, 140269470535680, 140269470547967,
+SNULL, 140269472641023, 140269472649215,
+STORE, 140269470547968, 140269472641023,
+STORE, 140269472641024, 140269472649215,
+ERASE, 140269472641024, 140269472649215,
+STORE, 140269472641024, 140269472649215,
+STORE, 140269466738688, 140269470535679,
+SNULL, 140269466738688, 140269468397567,
+STORE, 140269468397568, 140269470535679,
+STORE, 140269466738688, 140269468397567,
+SNULL, 140269470494719, 140269470535679,
+STORE, 140269468397568, 140269470494719,
+STORE, 140269470494720, 140269470535679,
+SNULL, 140269470494720, 140269470519295,
+STORE, 140269470519296, 140269470535679,
+STORE, 140269470494720, 140269470519295,
+ERASE, 140269470494720, 140269470519295,
+STORE, 140269470494720, 140269470519295,
+ERASE, 140269470519296, 140269470535679,
+STORE, 140269470519296, 140269470535679,
+STORE, 140269474844672, 140269474861055,
+SNULL, 140269470511103, 140269470519295,
+STORE, 140269470494720, 140269470511103,
+STORE, 140269470511104, 140269470519295,
+SNULL, 140269472645119, 140269472649215,
+STORE, 140269472641024, 140269472645119,
+STORE, 140269472645120, 140269472649215,
+SNULL, 94231601115135, 94231601123327,
+STORE, 94231601111040, 94231601115135,
+STORE, 94231601115136, 94231601123327,
+SNULL, 140269474893823, 140269474897919,
+STORE, 140269474889728, 140269474893823,
+STORE, 140269474893824, 140269474897919,
+ERASE, 140269474861056, 140269474889727,
+STORE, 94231626592256, 94231626727423,
+STORE, 140269473161216, 140269474844671,
+STORE, 94231626592256, 94231626862591,
+STORE, 94231626592256, 94231626997759,
+STORE, 94327178862592, 94327179075583,
+STORE, 94327181172736, 94327181176831,
+STORE, 94327181176832, 94327181185023,
+STORE, 94327181185024, 94327181197311,
+STORE, 94327185715200, 94327186685951,
+STORE, 140172071755776, 140172073414655,
+STORE, 140172073414656, 140172075511807,
+STORE, 140172075511808, 140172075528191,
+STORE, 140172075528192, 140172075536383,
+STORE, 140172075536384, 140172075552767,
+STORE, 140172075552768, 140172075565055,
+STORE, 140172075565056, 140172077658111,
+STORE, 140172077658112, 140172077662207,
+STORE, 140172077662208, 140172077666303,
+STORE, 140172077666304, 140172077809663,
+STORE, 140172078178304, 140172079861759,
+STORE, 140172079861760, 140172079878143,
+STORE, 140172079878144, 140172079906815,
+STORE, 140172079906816, 140172079910911,
+STORE, 140172079910912, 140172079915007,
+STORE, 140172079915008, 140172079919103,
+STORE, 140720358359040, 140720358494207,
+STORE, 140720358498304, 140720358510591,
+STORE, 140720358510592, 140720358514687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140722548621312, 140737488351231,
+SNULL, 140722548629503, 140737488351231,
+STORE, 140722548621312, 140722548629503,
+STORE, 140722548490240, 140722548629503,
+STORE, 93949289504768, 93949291728895,
+SNULL, 93949289615359, 93949291728895,
+STORE, 93949289504768, 93949289615359,
+STORE, 93949289615360, 93949291728895,
+ERASE, 93949289615360, 93949291728895,
+STORE, 93949291708416, 93949291720703,
+STORE, 93949291720704, 93949291728895,
+STORE, 140305861902336, 140305864155135,
+SNULL, 140305862045695, 140305864155135,
+STORE, 140305861902336, 140305862045695,
+STORE, 140305862045696, 140305864155135,
+ERASE, 140305862045696, 140305864155135,
+STORE, 140305864142848, 140305864151039,
+STORE, 140305864151040, 140305864155135,
+STORE, 140722549821440, 140722549825535,
+STORE, 140722549809152, 140722549821439,
+STORE, 140305864114176, 140305864142847,
+STORE, 140305864105984, 140305864114175,
+STORE, 140305858105344, 140305861902335,
+SNULL, 140305858105344, 140305859764223,
+STORE, 140305859764224, 140305861902335,
+STORE, 140305858105344, 140305859764223,
+SNULL, 140305861861375, 140305861902335,
+STORE, 140305859764224, 140305861861375,
+STORE, 140305861861376, 140305861902335,
+SNULL, 140305861861376, 140305861885951,
+STORE, 140305861885952, 140305861902335,
+STORE, 140305861861376, 140305861885951,
+ERASE, 140305861861376, 140305861885951,
+STORE, 140305861861376, 140305861885951,
+ERASE, 140305861885952, 140305861902335,
+STORE, 140305861885952, 140305861902335,
+SNULL, 140305861877759, 140305861885951,
+STORE, 140305861861376, 140305861877759,
+STORE, 140305861877760, 140305861885951,
+SNULL, 93949291716607, 93949291720703,
+STORE, 93949291708416, 93949291716607,
+STORE, 93949291716608, 93949291720703,
+SNULL, 140305864146943, 140305864151039,
+STORE, 140305864142848, 140305864146943,
+STORE, 140305864146944, 140305864151039,
+ERASE, 140305864114176, 140305864142847,
+STORE, 93949324136448, 93949324271615,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140725754908672, 140737488351231,
+SNULL, 140725754916863, 140737488351231,
+STORE, 140725754908672, 140725754916863,
+STORE, 140725754777600, 140725754916863,
+STORE, 94831184375808, 94831186599935,
+SNULL, 94831184486399, 94831186599935,
+STORE, 94831184375808, 94831184486399,
+STORE, 94831184486400, 94831186599935,
+ERASE, 94831184486400, 94831186599935,
+STORE, 94831186579456, 94831186591743,
+STORE, 94831186591744, 94831186599935,
+STORE, 140605482479616, 140605484732415,
+SNULL, 140605482622975, 140605484732415,
+STORE, 140605482479616, 140605482622975,
+STORE, 140605482622976, 140605484732415,
+ERASE, 140605482622976, 140605484732415,
+STORE, 140605484720128, 140605484728319,
+STORE, 140605484728320, 140605484732415,
+STORE, 140725755670528, 140725755674623,
+STORE, 140725755658240, 140725755670527,
+STORE, 140605484691456, 140605484720127,
+STORE, 140605484683264, 140605484691455,
+STORE, 140605478682624, 140605482479615,
+SNULL, 140605478682624, 140605480341503,
+STORE, 140605480341504, 140605482479615,
+STORE, 140605478682624, 140605480341503,
+SNULL, 140605482438655, 140605482479615,
+STORE, 140605480341504, 140605482438655,
+STORE, 140605482438656, 140605482479615,
+SNULL, 140605482438656, 140605482463231,
+STORE, 140605482463232, 140605482479615,
+STORE, 140605482438656, 140605482463231,
+ERASE, 140605482438656, 140605482463231,
+STORE, 140605482438656, 140605482463231,
+ERASE, 140605482463232, 140605482479615,
+STORE, 140605482463232, 140605482479615,
+SNULL, 140605482455039, 140605482463231,
+STORE, 140605482438656, 140605482455039,
+STORE, 140605482455040, 140605482463231,
+SNULL, 94831186587647, 94831186591743,
+STORE, 94831186579456, 94831186587647,
+STORE, 94831186587648, 94831186591743,
+SNULL, 140605484724223, 140605484728319,
+STORE, 140605484720128, 140605484724223,
+STORE, 140605484724224, 140605484728319,
+ERASE, 140605484691456, 140605484720127,
+STORE, 94831217156096, 94831217291263,
+STORE, 94327178862592, 94327179075583,
+STORE, 94327181172736, 94327181176831,
+STORE, 94327181176832, 94327181185023,
+STORE, 94327181185024, 94327181197311,
+STORE, 94327185715200, 94327186685951,
+STORE, 140172071755776, 140172073414655,
+STORE, 140172073414656, 140172075511807,
+STORE, 140172075511808, 140172075528191,
+STORE, 140172075528192, 140172075536383,
+STORE, 140172075536384, 140172075552767,
+STORE, 140172075552768, 140172075565055,
+STORE, 140172075565056, 140172077658111,
+STORE, 140172077658112, 140172077662207,
+STORE, 140172077662208, 140172077666303,
+STORE, 140172077666304, 140172077809663,
+STORE, 140172078178304, 140172079861759,
+STORE, 140172079861760, 140172079878143,
+STORE, 140172079878144, 140172079906815,
+STORE, 140172079906816, 140172079910911,
+STORE, 140172079910912, 140172079915007,
+STORE, 140172079915008, 140172079919103,
+STORE, 140720358359040, 140720358494207,
+STORE, 140720358498304, 140720358510591,
+STORE, 140720358510592, 140720358514687,
+STORE, 140737488347136, 140737488351231,
+STORE, 140737488343040, 140737488351231,
+STORE, 140737488338944, 140737488351231,
+STORE, 140734529933312, 140737488351231,
+SNULL, 140734529945599, 140737488351231,
+STORE, 140734529933312, 140734529945599,
+STORE, 140734529802240, 140734529945599,
+STORE, 4194304, 26279935,
+STORE, 28372992, 28454911,
+STORE, 28454912, 29806591,
+STORE, 140249744060416, 140249746313215,
+SNULL, 140249744203775, 140249746313215,
+STORE, 140249744060416, 140249744203775,
+STORE, 140249744203776, 140249746313215,
+ERASE, 140249744203776, 140249746313215,
+STORE, 140249746300928, 140249746309119,
+STORE, 140249746309120, 140249746313215,
+STORE, 140734530174976, 140734530179071,
+STORE, 140734530162688, 140734530174975,
+STORE, 140249746272256, 140249746300927,
+STORE, 140249746264064, 140249746272255,
+STORE, 140249740226560, 140249744060415,
+SNULL, 140249740226560, 140249741934591,
+STORE, 140249741934592, 140249744060415,
+STORE, 140249740226560, 140249741934591,
+SNULL, 140249744027647, 140249744060415,
+STORE, 140249741934592, 140249744027647,
+STORE, 140249744027648, 140249744060415,
+ERASE, 140249744027648, 140249744060415,
+STORE, 140249744027648, 140249744060415,
+STORE, 140249738031104, 140249740226559,
+SNULL, 140249738031104, 140249738125311,
+STORE, 140249738125312, 140249740226559,
+STORE, 140249738031104, 140249738125311,
+SNULL, 140249740218367, 140249740226559,
+STORE, 140249738125312, 140249740218367,
+STORE, 140249740218368, 140249740226559,
+ERASE, 140249740218368, 140249740226559,
+STORE, 140249740218368, 140249740226559,
+STORE, 140249735512064, 140249738031103,
+SNULL, 140249735512064, 140249735925759,
+STORE, 140249735925760, 140249738031103,
+STORE, 140249735512064, 140249735925759,
+SNULL, 140249738018815, 140249738031103,
+STORE, 140249735925760, 140249738018815,
+STORE, 140249738018816, 140249738031103,
+ERASE, 140249738018816, 140249738031103,
+STORE, 140249738018816, 140249738031103,
+STORE, 140249732878336, 140249735512063,
+SNULL, 140249732878336, 140249733406719,
+STORE, 140249733406720, 140249735512063,
+STORE, 140249732878336, 140249733406719,
+SNULL, 140249735503871, 140249735512063,
+STORE, 140249733406720, 140249735503871,
+STORE, 140249735503872, 140249735512063,
+ERASE, 140249735503872, 140249735512063,
+STORE, 140249735503872, 140249735512063,
+STORE, 140249730764800, 140249732878335,
+SNULL, 140249730764800, 140249730777087,
+STORE, 140249730777088, 140249732878335,
+STORE, 140249730764800, 140249730777087,
+SNULL, 140249732870143, 140249732878335,
+STORE, 140249730777088, 140249732870143,
+STORE, 140249732870144, 140249732878335,
+ERASE, 140249732870144, 140249732878335,
+STORE, 140249732870144, 140249732878335,
+STORE, 140249728561152, 140249730764799,
+SNULL, 140249728561152, 140249728663551,
+STORE, 140249728663552, 140249730764799,
+STORE, 140249728561152, 140249728663551,
+SNULL, 140249730756607, 140249730764799,
+STORE, 140249728663552, 140249730756607,
+STORE, 140249730756608, 140249730764799,
+ERASE, 140249730756608, 140249730764799,
+STORE, 140249730756608, 140249730764799,
+STORE, 140249746255872, 140249746272255,
+STORE, 140249725399040, 140249728561151,
+SNULL, 140249725399040, 140249726459903,
+STORE, 140249726459904, 140249728561151,
+STORE, 140249725399040, 140249726459903,
+SNULL, 140249728552959, 140249728561151,
+STORE, 140249726459904, 140249728552959,
+STORE, 140249728552960, 140249728561151,
+ERASE, 140249728552960, 140249728561151,
+STORE, 140249728552960, 140249728561151,
+STORE, 140249721602048, 140249725399039,
+SNULL, 140249721602048, 140249723260927,
+STORE, 140249723260928, 140249725399039,
+STORE, 140249721602048, 140249723260927,
+SNULL, 140249725358079, 140249725399039,
+STORE, 140249723260928, 140249725358079,
+STORE, 140249725358080, 140249725399039,
+SNULL, 140249725358080, 140249725382655,
+STORE, 140249725382656, 140249725399039,
+STORE, 140249725358080, 140249725382655,
+ERASE, 140249725358080, 140249725382655,
+STORE, 140249725358080, 140249725382655,
+ERASE, 140249725382656, 140249725399039,
+STORE, 140249725382656, 140249725399039,
+STORE, 140249746243584, 140249746272255,
+SNULL, 140249725374463, 140249725382655,
+STORE, 140249725358080, 140249725374463,
+STORE, 140249725374464, 140249725382655,
+SNULL, 140249728557055, 140249728561151,
+STORE, 140249728552960, 140249728557055,
+STORE, 140249728557056, 140249728561151,
+SNULL, 140249730760703, 140249730764799,
+STORE, 140249730756608, 140249730760703,
+STORE, 140249730760704, 140249730764799,
+SNULL, 140249732874239, 140249732878335,
+STORE, 140249732870144, 140249732874239,
+STORE, 140249732874240, 140249732878335,
+SNULL, 140249735507967, 140249735512063,
+STORE, 140249735503872, 140249735507967,
+STORE, 140249735507968, 140249735512063,
+SNULL, 140249738027007, 140249738031103,
+STORE, 140249738018816, 140249738027007,
+STORE, 140249738027008, 140249738031103,
+SNULL, 140249740222463, 140249740226559,
+STORE, 140249740218368, 140249740222463,
+STORE, 140249740222464, 140249740226559,
+SNULL, 140249744031743, 140249744060415,
+STORE, 140249744027648, 140249744031743,
+STORE, 140249744031744, 140249744060415,
+SNULL, 28405759, 28454911,
+STORE, 28372992, 28405759,
+STORE, 28405760, 28454911,
+SNULL, 140249746305023, 140249746309119,
+STORE, 140249746300928, 140249746305023,
+STORE, 140249746305024, 140249746309119,
+ERASE, 140249746272256, 140249746300927,
+STORE, 33853440, 33988607,
+STORE, 140249744560128, 140249746243583,
+STORE, 140249746296832, 140249746300927,
+STORE, 140249744424960, 140249744560127,
+STORE, 33853440, 34131967,
+STORE, 140249719504896, 140249721602047,
+STORE, 140249746288640, 140249746300927,
+STORE, 140249746280448, 140249746300927,
+STORE, 140249746243584, 140249746280447,
+STORE, 140249744408576, 140249744560127,
+STORE, 33853440, 34267135,
+STORE, 33853440, 34422783,
+STORE, 140249744400384, 140249744560127,
+STORE, 140249744392192, 140249744560127,
+STORE, 33853440, 34557951,
+STORE, 33853440, 34693119,
+STORE, 140249744375808, 140249744560127,
+STORE, 140249744367616, 140249744560127,
+STORE, 33853440, 34832383,
+STORE, 140249719230464, 140249721602047,
+STORE, 140249744207872, 140249744560127,
+STORE, 33853440, 34971647,
+SNULL, 34963455, 34971647,
+STORE, 33853440, 34963455,
+STORE, 34963456, 34971647,
+ERASE, 34963456, 34971647,
+SNULL, 34955263, 34963455,
+STORE, 33853440, 34955263,
+STORE, 34955264, 34963455,
+ERASE, 34955264, 34963455,
+SNULL, 34947071, 34955263,
+STORE, 33853440, 34947071,
+STORE, 34947072, 34955263,
+ERASE, 34947072, 34955263,
+SNULL, 34938879, 34947071,
+STORE, 33853440, 34938879,
+STORE, 34938880, 34947071,
+ERASE, 34938880, 34947071,
+STORE, 140249719214080, 140249721602047,
+STORE, 140249719148544, 140249721602047,
+STORE, 140249719115776, 140249721602047,
+STORE, 140249717018624, 140249721602047,
+STORE, 140249716953088, 140249721602047,
+STORE, 33853440, 35086335,
+STORE, 140249716822016, 140249721602047,
+STORE, 140249716559872, 140249721602047,
+STORE, 140249716551680, 140249721602047,
+STORE, 140249716535296, 140249721602047,
+STORE, 140249716527104, 140249721602047,
+STORE, 140249716518912, 140249721602047,
+STORE, 33853440, 35221503,
+SNULL, 35213311, 35221503,
+STORE, 33853440, 35213311,
+STORE, 35213312, 35221503,
+ERASE, 35213312, 35221503,
+SNULL, 35205119, 35213311,
+STORE, 33853440, 35205119,
+STORE, 35205120, 35213311,
+ERASE, 35205120, 35213311,
+SNULL, 35192831, 35205119,
+STORE, 33853440, 35192831,
+STORE, 35192832, 35205119,
+ERASE, 35192832, 35205119,
+SNULL, 35176447, 35192831,
+STORE, 33853440, 35176447,
+STORE, 35176448, 35192831,
+ERASE, 35176448, 35192831,
+STORE, 140249716502528, 140249721602047,
+STORE, 33853440, 35311615,
+SNULL, 35307519, 35311615,
+STORE, 33853440, 35307519,
+STORE, 35307520, 35311615,
+ERASE, 35307520, 35311615,
+SNULL, 35303423, 35307519,
+STORE, 33853440, 35303423,
+STORE, 35303424, 35307519,
+ERASE, 35303424, 35307519,
+SNULL, 35299327, 35303423,
+STORE, 33853440, 35299327,
+STORE, 35299328, 35303423,
+ERASE, 35299328, 35303423,
+SNULL, 35295231, 35299327,
+STORE, 33853440, 35295231,
+STORE, 35295232, 35299327,
+ERASE, 35295232, 35299327,
+SNULL, 35291135, 35295231,
+STORE, 33853440, 35291135,
+STORE, 35291136, 35295231,
+ERASE, 35291136, 35295231,
+SNULL, 35287039, 35291135,
+STORE, 33853440, 35287039,
+STORE, 35287040, 35291135,
+ERASE, 35287040, 35291135,
+SNULL, 35282943, 35287039,
+STORE, 33853440, 35282943,
+STORE, 35282944, 35287039,
+ERASE, 35282944, 35287039,
+STORE, 140249716486144, 140249721602047,
+STORE, 140249716453376, 140249721602047,
+STORE, 33853440, 35418111,
+SNULL, 35401727, 35418111,
+STORE, 33853440, 35401727,
+STORE, 35401728, 35418111,
+ERASE, 35401728, 35418111,
+SNULL, 35389439, 35401727,
+STORE, 33853440, 35389439,
+STORE, 35389440, 35401727,
+ERASE, 35389440, 35401727,
+STORE, 140249714356224, 140249721602047,
+STORE, 33853440, 35540991,
+STORE, 140249714339840, 140249721602047,
+STORE, 140249714077696, 140249721602047,
+STORE, 140249714069504, 140249721602047,
+STORE, 140249714061312, 140249721602047,
+STORE, 33853440, 35680255,
+SNULL, 35672063, 35680255,
+STORE, 33853440, 35672063,
+STORE, 35672064, 35680255,
+ERASE, 35672064, 35680255,
+SNULL, 35627007, 35672063,
+STORE, 33853440, 35627007,
+STORE, 35627008, 35672063,
+ERASE, 35627008, 35672063,
+STORE, 140249711964160, 140249721602047,
+STORE, 33853440, 35762175,
+SNULL, 35753983, 35762175,
+STORE, 33853440, 35753983,
+STORE, 35753984, 35762175,
+ERASE, 35753984, 35762175,
+SNULL, 35745791, 35753983,
+STORE, 33853440, 35745791,
+STORE, 35745792, 35753983,
+ERASE, 35745792, 35753983,
+STORE, 140249711955968, 140249721602047,
+STORE, 140249711947776, 140249721602047,
+STORE, 140249710899200, 140249721602047,
+STORE, 140249710866432, 140249721602047,
+STORE, 140249710600192, 140249721602047,
+SNULL, 140249744424959, 140249744560127,
+STORE, 140249744207872, 140249744424959,
+STORE, 140249744424960, 140249744560127,
+ERASE, 140249744424960, 140249744560127,
+STORE, 140249708503040, 140249721602047,
+STORE, 33853440, 35885055,
+STORE, 140249707978752, 140249721602047,
+STORE, 140249705881600, 140249721602047,
+STORE, 33853440, 36036607,
+STORE, 33853440, 36175871,
+STORE, 140249744551936, 140249744560127,
+STORE, 140249744543744, 140249744560127,
+STORE, 140249744535552, 140249744560127,
+STORE, 140249744527360, 140249744560127,
+STORE, 140249744519168, 140249744560127,
+STORE, 140249705619456, 140249721602047,
+STORE, 140249744510976, 140249744560127,
+STORE, 140249744502784, 140249744560127,
+STORE, 140249744494592, 140249744560127,
+STORE, 140249744486400, 140249744560127,
+STORE, 140249744478208, 140249744560127,
+STORE, 140249744470016, 140249744560127,
+STORE, 140249744461824, 140249744560127,
+STORE, 140249744453632, 140249744560127,
+STORE, 140249744445440, 140249744560127,
+STORE, 140249744437248, 140249744560127,
+STORE, 140249744429056, 140249744560127,
+STORE, 140249703522304, 140249721602047,
+STORE, 33853440, 36311039,
+STORE, 140249703489536, 140249721602047,
+STORE, 33853440, 36474879,
+STORE, 140249703456768, 140249721602047,
+STORE, 33853440, 36622335,
+STORE, 140249703424000, 140249721602047,
+STORE, 140249703391232, 140249721602047,
+STORE, 33853440, 36810751,
+STORE, 140249703358464, 140249721602047,
+STORE, 140249703325696, 140249721602047,
+SNULL, 36655103, 36810751,
+STORE, 33853440, 36655103,
+STORE, 36655104, 36810751,
+ERASE, 36655104, 36810751,
+SNULL, 36438015, 36655103,
+STORE, 33853440, 36438015,
+STORE, 36438016, 36655103,
+ERASE, 36438016, 36655103,
+STORE, 140249703317504, 140249721602047,
+STORE, 140249701220352, 140249721602047,
+STORE, 33853440, 36585471,
+STORE, 33853440, 36782079,
+STORE, 140249701212160, 140249721602047,
+STORE, 140249701203968, 140249721602047,
+STORE, 140249701195776, 140249721602047,
+STORE, 140249701187584, 140249721602047,
+STORE, 140249701179392, 140249721602047,
+STORE, 140249701171200, 140249721602047,
+STORE, 140249701163008, 140249721602047,
+STORE, 140249701154816, 140249721602047,
+STORE, 140249701146624, 140249721602047,
+STORE, 140249701138432, 140249721602047,
+STORE, 140249701130240, 140249721602047,
+STORE, 140249700081664, 140249721602047,
+STORE, 140249700073472, 140249721602047,
+STORE, 33853440, 36978687,
+STORE, 140249697976320, 140249721602047,
+STORE, 33853440, 37240831,
+STORE, 140249695879168, 140249721602047,
+STORE, 140249695870976, 140249721602047,
+STORE, 140249695862784, 140249721602047,
+STORE, 140249695854592, 140249721602047,
+STORE, 140249695326208, 140249721602047,
+SNULL, 140249710600191, 140249721602047,
+STORE, 140249695326208, 140249710600191,
+STORE, 140249710600192, 140249721602047,
+SNULL, 140249710600192, 140249710866431,
+STORE, 140249710866432, 140249721602047,
+STORE, 140249710600192, 140249710866431,
+ERASE, 140249710600192, 140249710866431,
+STORE, 140249691131904, 140249710600191,
+STORE, 33853440, 37474303,
+STORE, 140249710858240, 140249721602047,
+STORE, 140249710850048, 140249721602047,
+STORE, 140249710841856, 140249721602047,
+STORE, 140249710833664, 140249721602047,
+STORE, 140249710825472, 140249721602047,
+STORE, 140249710817280, 140249721602047,
+STORE, 140249710809088, 140249721602047,
+STORE, 140249710800896, 140249721602047,
+STORE, 140249710792704, 140249721602047,
+STORE, 140249710784512, 140249721602047,
+STORE, 140249710776320, 140249721602047,
+STORE, 140249710768128, 140249721602047,
+STORE, 140249710759936, 140249721602047,
+STORE, 140249710751744, 140249721602047,
+STORE, 140249710743552, 140249721602047,
+STORE, 140249710735360, 140249721602047,
+STORE, 140249689034752, 140249710600191,
+STORE, 140249710727168, 140249721602047,
+STORE, 140249686937600, 140249710600191,
+STORE, 33853440, 37867519,
+STORE, 140249684840448, 140249710600191,
+STORE, 140249710718976, 140249721602047,
+STORE, 140249682743296, 140249710600191,
+STORE, 140249710710784, 140249721602047,
+STORE, 140249710702592, 140249721602047,
+STORE, 140249710694400, 140249721602047,
+STORE, 140249710686208, 140249721602047,
+STORE, 140249710678016, 140249721602047,
+STORE, 140249682612224, 140249710600191,
+STORE, 140249682087936, 140249710600191,
+SNULL, 140249705619455, 140249710600191,
+STORE, 140249682087936, 140249705619455,
+STORE, 140249705619456, 140249710600191,
+SNULL, 140249705619456, 140249705881599,
+STORE, 140249705881600, 140249710600191,
+STORE, 140249705619456, 140249705881599,
+ERASE, 140249705619456, 140249705881599,
+STORE, 140249679990784, 140249705619455,
+STORE, 140249710669824, 140249721602047,
+STORE, 140249677893632, 140249705619455,
+STORE, 140249710653440, 140249721602047,
+STORE, 140249710645248, 140249721602047,
+STORE, 140249710637056, 140249721602047,
+STORE, 140249710628864, 140249721602047,
+STORE, 140249710620672, 140249721602047,
+STORE, 140249710612480, 140249721602047,
+STORE, 140249710604288, 140249721602047,
+STORE, 140249705873408, 140249710600191,
+STORE, 140249705865216, 140249710600191,
+STORE, 140249705857024, 140249710600191,
+STORE, 140249705848832, 140249710600191,
+STORE, 140249705840640, 140249710600191,
+STORE, 140249705832448, 140249710600191,
+STORE, 140249705824256, 140249710600191,
+STORE, 140249705816064, 140249710600191,
+STORE, 140249705807872, 140249710600191,
+STORE, 140249705799680, 140249710600191,
+STORE, 33853440, 38129663,
+SNULL, 140249744207872, 140249744367615,
+STORE, 140249744367616, 140249744424959,
+STORE, 140249744207872, 140249744367615,
+ERASE, 140249744207872, 140249744367615,
+STORE, 140249677606912, 140249705619455,
+STORE, 140249675509760, 140249705619455,
+SNULL, 140249677606911, 140249705619455,
+STORE, 140249675509760, 140249677606911,
+STORE, 140249677606912, 140249705619455,
+SNULL, 140249677606912, 140249677893631,
+STORE, 140249677893632, 140249705619455,
+STORE, 140249677606912, 140249677893631,
+ERASE, 140249677606912, 140249677893631,
+STORE, 140249744359424, 140249744424959,
+STORE, 33853440, 38391807,
+STORE, 140249674981376, 140249677606911,
+STORE, 140249672884224, 140249677606911,
+SNULL, 140249719230463, 140249721602047,
+STORE, 140249710604288, 140249719230463,
+STORE, 140249719230464, 140249721602047,
+SNULL, 140249719230464, 140249719504895,
+STORE, 140249719504896, 140249721602047,
+STORE, 140249719230464, 140249719504895,
+ERASE, 140249719230464, 140249719504895,
+STORE, 140249744351232, 140249744424959,
+STORE, 140249744343040, 140249744424959,
+STORE, 140249744334848, 140249744424959,
+STORE, 140249744326656, 140249744424959,
+STORE, 140249744310272, 140249744424959,
+STORE, 140249744302080, 140249744424959,
+STORE, 140249744285696, 140249744424959,
+STORE, 140249744277504, 140249744424959,
+STORE, 140249744261120, 140249744424959,
+STORE, 140249744252928, 140249744424959,
+STORE, 140249744220160, 140249744424959,
+STORE, 140249744211968, 140249744424959,
+STORE, 140249719488512, 140249721602047,
+STORE, 140249744203776, 140249744424959,
+STORE, 140249719472128, 140249721602047,
+STORE, 140249719463936, 140249721602047,
+STORE, 140249719447552, 140249721602047,
+STORE, 140249719439360, 140249721602047,
+STORE, 140249719406592, 140249721602047,
+STORE, 140249719398400, 140249721602047,
+STORE, 140249719382016, 140249721602047,
+STORE, 140249719373824, 140249721602047,
+STORE, 140249719357440, 140249721602047,
+STORE, 140249719349248, 140249721602047,
+STORE, 140249719332864, 140249721602047,
+STORE, 140249719324672, 140249721602047,
+STORE, 140249719291904, 140249721602047,
+STORE, 140249719283712, 140249721602047,
+STORE, 140249719267328, 140249721602047,
+STORE, 140249719259136, 140249721602047,
+STORE, 140249719242752, 140249721602047,
+STORE, 140249719234560, 140249721602047,
+STORE, 140249705783296, 140249710600191,
+STORE, 140249705775104, 140249710600191,
+STORE, 140249705742336, 140249710600191,
+STORE, 140249705734144, 140249710600191,
+STORE, 140249705717760, 140249710600191,
+STORE, 140249670787072, 140249677606911,
+STORE, 140249705709568, 140249710600191,
+STORE, 140249705693184, 140249710600191,
+STORE, 140249705684992, 140249710600191,
+STORE, 140249705668608, 140249710600191,
+STORE, 140249705660416, 140249710600191,
+STORE, 140249705627648, 140249710600191,
+STORE, 140249677893632, 140249710600191,
+STORE, 140249677877248, 140249710600191,
+STORE, 140249677869056, 140249710600191,
+STORE, 140249677852672, 140249710600191,
+STORE, 140249677844480, 140249710600191,
+STORE, 140249677828096, 140249710600191,
+STORE, 140249668689920, 140249677606911,
+STORE, 140249677819904, 140249710600191,
+STORE, 140249677787136, 140249710600191,
+STORE, 140249677778944, 140249710600191,
+STORE, 140249677762560, 140249710600191,
+STORE, 140249677754368, 140249710600191,
+STORE, 140249677737984, 140249710600191,
+STORE, 140249677729792, 140249710600191,
+STORE, 140249677713408, 140249710600191,
+STORE, 140249677705216, 140249710600191,
+STORE, 140249677672448, 140249710600191,
+STORE, 140249677664256, 140249710600191,
+STORE, 140249677647872, 140249710600191,
+STORE, 140249677639680, 140249710600191,
+STORE, 140249677623296, 140249710600191,
+STORE, 140249677615104, 140249710600191,
+STORE, 140249668673536, 140249677606911,
+STORE, 140249668673536, 140249710600191,
+STORE, 140249668640768, 140249710600191,
+STORE, 140249668632576, 140249710600191,
+STORE, 140249668616192, 140249710600191,
+STORE, 140249668608000, 140249710600191,
+STORE, 140249668591616, 140249710600191,
+STORE, 140249668583424, 140249710600191,
+STORE, 140249668567040, 140249710600191,
+STORE, 140249668558848, 140249710600191,
+STORE, 140249668526080, 140249710600191,
+STORE, 140249668517888, 140249710600191,
+STORE, 140249668501504, 140249710600191,
+STORE, 140249668493312, 140249710600191,
+STORE, 140249668476928, 140249710600191,
+STORE, 140249668468736, 140249710600191,
+STORE, 140249668452352, 140249710600191,
+STORE, 140249668444160, 140249710600191,
+STORE, 140249668411392, 140249710600191,
+STORE, 140249668403200, 140249710600191,
+STORE, 140249668386816, 140249710600191,
+STORE, 140249668378624, 140249710600191,
+STORE, 140249668362240, 140249710600191,
+STORE, 140249668354048, 140249710600191,
+STORE, 140249668337664, 140249710600191,
+STORE, 140249668329472, 140249710600191,
+STORE, 140249668296704, 140249710600191,
+STORE, 140249668288512, 140249710600191,
+STORE, 140249668272128, 140249710600191,
+STORE, 140249668263936, 140249710600191,
+STORE, 140249668247552, 140249710600191,
+STORE, 140249668239360, 140249710600191,
+STORE, 140249668222976, 140249710600191,
+STORE, 140249668214784, 140249710600191,
+STORE, 140249668182016, 140249710600191,
+STORE, 140249668173824, 140249710600191,
+STORE, 140249668157440, 140249710600191,
+STORE, 140249668149248, 140249710600191,
+STORE, 140249668132864, 140249710600191,
+STORE, 140249668124672, 140249710600191,
+STORE, 140249668108288, 140249710600191,
+STORE, 140249668100096, 140249710600191,
+STORE, 140249668067328, 140249710600191,
+STORE, 140249668059136, 140249710600191,
+STORE, 140249668042752, 140249710600191,
+STORE, 140249668034560, 140249710600191,
+STORE, 140249668018176, 140249710600191,
+STORE, 140249668009984, 140249710600191,
+STORE, 140249667993600, 140249710600191,
+STORE, 140249667985408, 140249710600191,
+STORE, 140249667952640, 140249710600191,
+STORE, 140249667944448, 140249710600191,
+STORE, 140249667928064, 140249710600191,
+STORE, 140249667919872, 140249710600191,
+STORE, 140249667903488, 140249710600191,
+STORE, 140249667895296, 140249710600191,
+STORE, 140249667878912, 140249710600191,
+STORE, 140249667870720, 140249710600191,
+STORE, 140249667837952, 140249710600191,
+STORE, 140249667829760, 140249710600191,
+STORE, 140249667813376, 140249710600191,
+STORE, 140249667805184, 140249710600191,
+STORE, 140249667788800, 140249710600191,
+STORE, 140249667780608, 140249710600191,
+STORE, 140249667764224, 140249710600191,
+STORE, 140249667756032, 140249710600191,
+STORE, 140249667723264, 140249710600191,
+STORE, 140249667715072, 140249710600191,
+STORE, 140249667698688, 140249710600191,
+STORE, 140249667690496, 140249710600191,
+STORE, 140249667674112, 140249710600191,
+STORE, 140249667665920, 140249710600191,
+STORE, 140249667649536, 140249710600191,
+STORE, 140249667641344, 140249710600191,
+STORE, 140249667608576, 140249710600191,
+STORE, 140249667600384, 140249710600191,
+STORE, 140249667584000, 140249710600191,
+STORE, 140249667575808, 140249710600191,
+STORE, 140249667559424, 140249710600191,
+STORE, 140249667551232, 140249710600191,
+STORE, 140249667534848, 140249710600191,
+STORE, 140249667526656, 140249710600191,
+STORE, 140249667493888, 140249710600191,
+STORE, 140249667485696, 140249710600191,
+STORE, 140249667469312, 140249710600191,
+STORE, 140249667461120, 140249710600191,
+STORE, 140249667444736, 140249710600191,
+STORE, 140249667436544, 140249710600191,
+STORE, 140249667420160, 140249710600191,
+STORE, 140249665323008, 140249710600191,
+STORE, 140249665314816, 140249710600191,
+STORE, 140249665282048, 140249710600191,
+STORE, 140249665273856, 140249710600191,
+STORE, 140249665257472, 140249710600191,
+STORE, 140249665249280, 140249710600191,
+STORE, 140249665232896, 140249710600191,
+STORE, 140249665224704, 140249710600191,
+STORE, 140249665208320, 140249710600191,
+STORE, 140249665200128, 140249710600191,
+STORE, 140249665167360, 140249710600191,
+STORE, 140249665159168, 140249710600191,
+STORE, 140249665142784, 140249710600191,
+STORE, 140249665134592, 140249710600191,
+STORE, 140249665118208, 140249710600191,
+STORE, 140249665110016, 140249710600191,
+STORE, 140249665093632, 140249710600191,
+STORE, 140249665085440, 140249710600191,
+STORE, 140249665052672, 140249710600191,
+STORE, 140249665044480, 140249710600191,
+STORE, 140249665028096, 140249710600191,
+STORE, 140249665019904, 140249710600191,
+STORE, 140249665003520, 140249710600191,
+STORE, 140249664995328, 140249710600191,
+STORE, 140249664978944, 140249710600191,
+STORE, 140249664970752, 140249710600191,
+STORE, 140249664937984, 140249710600191,
+STORE, 140249664929792, 140249710600191,
+STORE, 140249664913408, 140249710600191,
+STORE, 140249664905216, 140249710600191,
+STORE, 140249664888832, 140249710600191,
+STORE, 140249664880640, 140249710600191,
+STORE, 140249664864256, 140249710600191,
+STORE, 140249664856064, 140249710600191,
+STORE, 140249664823296, 140249710600191,
+STORE, 140249664815104, 140249710600191,
+STORE, 140249664798720, 140249710600191,
+STORE, 140249664790528, 140249710600191,
+STORE, 140249664774144, 140249710600191,
+STORE, 140249664765952, 140249710600191,
+STORE, 140249664749568, 140249710600191,
+STORE, 140249664741376, 140249710600191,
+STORE, 140249664708608, 140249710600191,
+STORE, 140249664700416, 140249710600191,
+STORE, 140249664684032, 140249710600191,
+STORE, 140249664675840, 140249710600191,
+STORE, 140249664659456, 140249710600191,
+STORE, 140249664651264, 140249710600191,
+STORE, 140249664634880, 140249710600191,
+STORE, 140249664626688, 140249710600191,
+STORE, 140249664593920, 140249710600191,
+STORE, 140249664585728, 140249710600191,
+STORE, 140249664569344, 140249710600191,
+STORE, 140249664561152, 140249710600191,
+STORE, 140249664544768, 140249710600191,
+STORE, 140249664536576, 140249710600191,
+STORE, 140249664520192, 140249710600191,
+STORE, 140249664512000, 140249710600191,
+STORE, 140249664479232, 140249710600191,
+STORE, 140249664471040, 140249710600191,
+STORE, 140249664454656, 140249710600191,
+STORE, 140249664446464, 140249710600191,
+STORE, 140249664430080, 140249710600191,
+STORE, 140249664421888, 140249710600191,
+STORE, 140249664405504, 140249710600191,
+STORE, 140249664397312, 140249710600191,
+STORE, 140249664364544, 140249710600191,
+STORE, 140249664356352, 140249710600191,
+STORE, 140249664339968, 140249710600191,
+STORE, 140249664331776, 140249710600191,
+STORE, 140249664315392, 140249710600191,
+STORE, 140249664307200, 140249710600191,
+STORE, 140249664290816, 140249710600191,
+STORE, 140249664282624, 140249710600191,
+STORE, 140249664249856, 140249710600191,
+STORE, 140249664241664, 140249710600191,
+STORE, 140249664225280, 140249710600191,
+STORE, 140249664217088, 140249710600191,
+STORE, 140249664200704, 140249710600191,
+STORE, 140249664192512, 140249710600191,
+STORE, 140249664176128, 140249710600191,
+STORE, 140249664167936, 140249710600191,
+STORE, 140249664135168, 140249710600191,
+STORE, 140249664126976, 140249710600191,
+STORE, 140249664110592, 140249710600191,
+STORE, 140249664102400, 140249710600191,
+STORE, 140249664086016, 140249710600191,
+STORE, 140249664077824, 140249710600191,
+STORE, 140249664061440, 140249710600191,
+STORE, 140249664053248, 140249710600191,
+STORE, 140249664020480, 140249710600191,
+STORE, 140249664012288, 140249710600191,
+STORE, 140249663995904, 140249710600191,
+STORE, 140249663987712, 140249710600191,
+STORE, 140249663971328, 140249710600191,
+STORE, 140249663963136, 140249710600191,
+STORE, 140249663946752, 140249710600191,
+STORE, 140249663938560, 140249710600191,
+STORE, 140249663905792, 140249710600191,
+STORE, 140249663897600, 140249710600191,
+STORE, 140249663881216, 140249710600191,
+STORE, 140249663873024, 140249710600191,
+STORE, 140249663856640, 140249710600191,
+STORE, 140249663848448, 140249710600191,
+STORE, 140249663832064, 140249710600191,
+STORE, 140249663823872, 140249710600191,
+STORE, 140249663791104, 140249710600191,
+STORE, 140249663782912, 140249710600191,
+STORE, 140249663766528, 140249710600191,
+STORE, 140249663758336, 140249710600191,
+STORE, 140249663741952, 140249710600191,
+STORE, 140249663733760, 140249710600191,
+STORE, 140249663717376, 140249710600191,
+STORE, 140249663709184, 140249710600191,
+STORE, 140249663676416, 140249710600191,
+STORE, 140249663668224, 140249710600191,
+STORE, 140249663651840, 140249710600191,
+STORE, 140249663643648, 140249710600191,
+STORE, 140249663627264, 140249710600191,
+STORE, 33853440, 38526975,
+STORE, 140249663619072, 140249710600191,
+STORE, 140249663602688, 140249710600191,
+STORE, 140249661505536, 140249710600191,
+STORE, 140249661497344, 140249710600191,
+STORE, 140249661464576, 140249710600191,
+STORE, 140249661456384, 140249710600191,
+STORE, 140249661440000, 140249710600191,
+STORE, 140249661431808, 140249710600191,
+STORE, 140249661415424, 140249710600191,
+STORE, 140249661407232, 140249710600191,
+STORE, 140249661390848, 140249710600191,
+STORE, 140249661382656, 140249710600191,
+STORE, 140249661349888, 140249710600191,
+STORE, 140249661341696, 140249710600191,
+STORE, 140249661325312, 140249710600191,
+STORE, 140249661317120, 140249710600191,
+STORE, 140249661300736, 140249710600191,
+STORE, 140249661292544, 140249710600191,
+STORE, 140249661276160, 140249710600191,
+STORE, 140249661267968, 140249710600191,
+STORE, 140249661235200, 140249710600191,
+STORE, 140249661227008, 140249710600191,
+STORE, 140249661210624, 140249710600191,
+STORE, 140249661202432, 140249710600191,
+STORE, 140249661186048, 140249710600191,
+STORE, 140249661177856, 140249710600191,
+STORE, 140249661161472, 140249710600191,
+STORE, 140249661153280, 140249710600191,
+STORE, 140249661120512, 140249710600191,
+STORE, 140249661112320, 140249710600191,
+STORE, 140249661095936, 140249710600191,
+STORE, 140249661087744, 140249710600191,
+STORE, 140249661071360, 140249710600191,
+STORE, 140249661063168, 140249710600191,
+STORE, 140249661046784, 140249710600191,
+STORE, 140249661038592, 140249710600191,
+STORE, 140249661005824, 140249710600191,
+STORE, 140249660997632, 140249710600191,
+STORE, 140249660981248, 140249710600191,
+STORE, 140249660973056, 140249710600191,
+STORE, 140249660956672, 140249710600191,
+STORE, 140249660948480, 140249710600191,
+STORE, 140249660932096, 140249710600191,
+STORE, 140249660923904, 140249710600191,
+STORE, 140249660891136, 140249710600191,
+STORE, 140249660882944, 140249710600191,
+STORE, 140249660866560, 140249710600191,
+STORE, 140249660858368, 140249710600191,
+STORE, 140249660841984, 140249710600191,
+STORE, 140249660833792, 140249710600191,
+STORE, 140249660817408, 140249710600191,
+STORE, 140249660809216, 140249710600191,
+STORE, 140249660776448, 140249710600191,
+STORE, 140249660768256, 140249710600191,
+STORE, 140249660751872, 140249710600191,
+STORE, 140249660743680, 140249710600191,
+STORE, 140249660727296, 140249710600191,
+STORE, 140249660719104, 140249710600191,
+STORE, 140249660702720, 140249710600191,
+STORE, 140249660694528, 140249710600191,
+STORE, 140249660661760, 140249710600191,
+STORE, 140249660653568, 140249710600191,
+STORE, 140249660637184, 140249710600191,
+STORE, 140249660628992, 140249710600191,
+STORE, 140249660612608, 140249710600191,
+STORE, 140249660604416, 140249710600191,
+STORE, 140249660588032, 140249710600191,
+STORE, 140249660579840, 140249710600191,
+STORE, 140249660547072, 140249710600191,
+STORE, 140249660538880, 140249710600191,
+STORE, 140249660522496, 140249710600191,
+STORE, 140249660514304, 140249710600191,
+STORE, 140249660497920, 140249710600191,
+STORE, 140249660489728, 140249710600191,
+STORE, 140249660473344, 140249710600191,
+STORE, 140249660465152, 140249710600191,
+STORE, 140249660432384, 140249710600191,
+STORE, 140249660424192, 140249710600191,
+STORE, 140249660407808, 140249710600191,
+STORE, 140249660399616, 140249710600191,
+STORE, 140249660383232, 140249710600191,
+STORE, 140249660375040, 140249710600191,
+STORE, 140249660358656, 140249710600191,
+STORE, 140249660350464, 140249710600191,
+STORE, 140249660317696, 140249710600191,
+STORE, 140249660309504, 140249710600191,
+STORE, 140249660293120, 140249710600191,
+STORE, 140249660284928, 140249710600191,
+STORE, 140249660268544, 140249710600191,
+STORE, 140249660260352, 140249710600191,
+STORE, 140249660243968, 140249710600191,
+STORE, 140249660235776, 140249710600191,
+STORE, 140249660203008, 140249710600191,
+STORE, 140249660194816, 140249710600191,
+STORE, 140249660178432, 140249710600191,
+STORE, 140249660170240, 140249710600191,
+STORE, 140249660153856, 140249710600191,
+STORE, 140249660145664, 140249710600191,
+STORE, 140249660129280, 140249710600191,
+STORE, 140249660121088, 140249710600191,
+STORE, 140249660088320, 140249710600191,
+STORE, 140249660080128, 140249710600191,
+STORE, 140249660063744, 140249710600191,
+STORE, 140249660055552, 140249710600191,
+STORE, 140249660039168, 140249710600191,
+STORE, 140249660030976, 140249710600191,
+STORE, 140249660014592, 140249710600191,
+STORE, 140249660006400, 140249710600191,
+STORE, 140249659973632, 140249710600191,
+STORE, 140249659965440, 140249710600191,
+STORE, 140249659949056, 140249710600191,
+STORE, 140249659940864, 140249710600191,
+STORE, 140249659924480, 140249710600191,
+STORE, 140249659916288, 140249710600191,
+STORE, 140249659899904, 140249710600191,
+STORE, 140249659891712, 140249710600191,
+STORE, 140249659858944, 140249710600191,
+STORE, 140249659850752, 140249710600191,
+STORE, 140249659834368, 140249710600191,
+STORE, 140249659826176, 140249710600191,
+STORE, 140249659809792, 140249710600191,
+STORE, 140249659801600, 140249710600191,
+STORE, 140249659785216, 140249710600191,
+STORE, 140249657688064, 140249710600191,
+STORE, 140249657679872, 140249710600191,
+STORE, 140249657647104, 140249710600191,
+STORE, 140249657638912, 140249710600191,
+STORE, 140249657622528, 140249710600191,
+STORE, 140249657614336, 140249710600191,
+STORE, 140249657597952, 140249710600191,
+STORE, 140249657589760, 140249710600191,
+STORE, 140249657573376, 140249710600191,
+STORE, 140249657565184, 140249710600191,
+STORE, 140249657532416, 140249710600191,
+STORE, 140249657524224, 140249710600191,
+STORE, 140249657507840, 140249710600191,
+STORE, 140249657499648, 140249710600191,
+STORE, 140249657483264, 140249710600191,
+STORE, 140249657475072, 140249710600191,
+STORE, 140249657458688, 140249710600191,
+STORE, 140249657450496, 140249710600191,
+STORE, 140249657417728, 140249710600191,
+STORE, 140249657409536, 140249710600191,
+STORE, 140249657393152, 140249710600191,
+STORE, 140249657384960, 140249710600191,
+STORE, 140249657368576, 140249710600191,
+STORE, 140249657360384, 140249710600191,
+STORE, 140249657344000, 140249710600191,
+STORE, 140249657335808, 140249710600191,
+STORE, 140249657303040, 140249710600191,
+STORE, 140249657294848, 140249710600191,
+STORE, 140249657278464, 140249710600191,
+STORE, 140249657270272, 140249710600191,
+STORE, 140249657253888, 140249710600191,
+STORE, 140249657245696, 140249710600191,
+STORE, 140249657229312, 140249710600191,
+STORE, 140249657221120, 140249710600191,
+STORE, 140249657188352, 140249710600191,
+STORE, 140249657180160, 140249710600191,
+STORE, 140249657163776, 140249710600191,
+STORE, 140249657155584, 140249710600191,
+STORE, 140249657139200, 140249710600191,
+STORE, 140249657131008, 140249710600191,
+STORE, 140249657114624, 140249710600191,
+STORE, 140249657106432, 140249710600191,
+STORE, 140249657073664, 140249710600191,
+STORE, 140249657065472, 140249710600191,
+STORE, 140249657049088, 140249710600191,
+STORE, 140249657040896, 140249710600191,
+STORE, 140249657024512, 140249710600191,
+STORE, 140249657016320, 140249710600191,
+STORE, 140249656999936, 140249710600191,
+STORE, 140249656991744, 140249710600191,
+STORE, 140249656958976, 140249710600191,
+STORE, 140249656950784, 140249710600191,
+STORE, 140249656934400, 140249710600191,
+STORE, 140249656926208, 140249710600191,
+STORE, 140249656909824, 140249710600191,
+STORE, 140249656901632, 140249710600191,
+STORE, 140249656885248, 140249710600191,
+STORE, 140249656877056, 140249710600191,
+STORE, 140249656844288, 140249710600191,
+STORE, 140249656836096, 140249710600191,
+STORE, 140249656819712, 140249710600191,
+STORE, 140249656811520, 140249710600191,
+STORE, 140249656795136, 140249710600191,
+STORE, 33853440, 38662143,
+STORE, 140249656786944, 140249710600191,
+STORE, 140249656770560, 140249710600191,
+STORE, 140249656762368, 140249710600191,
+STORE, 140249656729600, 140249710600191,
+STORE, 140249656721408, 140249710600191,
+STORE, 140249656705024, 140249710600191,
+STORE, 140249656696832, 140249710600191,
+STORE, 140249656680448, 140249710600191,
+STORE, 140249656672256, 140249710600191,
+STORE, 140249656655872, 140249710600191,
+STORE, 140249656647680, 140249710600191,
+STORE, 140249656614912, 140249710600191,
+STORE, 140249656606720, 140249710600191,
+STORE, 140249656590336, 140249710600191,
+STORE, 140249656582144, 140249710600191,
+STORE, 140249656565760, 140249710600191,
+STORE, 140249656557568, 140249710600191,
+STORE, 140249656541184, 140249710600191,
+STORE, 140249656532992, 140249710600191,
+STORE, 140249656500224, 140249710600191,
+STORE, 140249656492032, 140249710600191,
+STORE, 140249656475648, 140249710600191,
+STORE, 140249656467456, 140249710600191,
+STORE, 140249656451072, 140249710600191,
+STORE, 140249656442880, 140249710600191,
+STORE, 140249656426496, 140249710600191,
+STORE, 140249656418304, 140249710600191,
+STORE, 140249656385536, 140249710600191,
+STORE, 140249656377344, 140249710600191,
+STORE, 140249656360960, 140249710600191,
+STORE, 140249656352768, 140249710600191,
+STORE, 140249656336384, 140249710600191,
+STORE, 140249656328192, 140249710600191,
+STORE, 140249656311808, 140249710600191,
+STORE, 140249656303616, 140249710600191,
+STORE, 140249656270848, 140249710600191,
+STORE, 140249656262656, 140249710600191,
+STORE, 140249656246272, 140249710600191,
+STORE, 140249656238080, 140249710600191,
+STORE, 140249656221696, 140249710600191,
+STORE, 140249656213504, 140249710600191,
+STORE, 140249656197120, 140249710600191,
+STORE, 140249656188928, 140249710600191,
+STORE, 140249656156160, 140249710600191,
+STORE, 140249656147968, 140249710600191,
+STORE, 140249656131584, 140249710600191,
+STORE, 140249656123392, 140249710600191,
+STORE, 140249656107008, 140249710600191,
+STORE, 140249656098816, 140249710600191,
+STORE, 140249656082432, 140249710600191,
+STORE, 140249656074240, 140249710600191,
+STORE, 140249656041472, 140249710600191,
+STORE, 140249656033280, 140249710600191,
+STORE, 140249656016896, 140249710600191,
+STORE, 140249656008704, 140249710600191,
+STORE, 140249655992320, 140249710600191,
+STORE, 140249655984128, 140249710600191,
+STORE, 140249655967744, 140249710600191,
+STORE, 140249653870592, 140249710600191,
+STORE, 140249653862400, 140249710600191,
+STORE, 140249653829632, 140249710600191,
+STORE, 140249653821440, 140249710600191,
+STORE, 140249653805056, 140249710600191,
+STORE, 140249653796864, 140249710600191,
+STORE, 140249653780480, 140249710600191,
+STORE, 140249653772288, 140249710600191,
+STORE, 140249653755904, 140249710600191,
+STORE, 140249652703232, 140249710600191,
+SNULL, 140249682087935, 140249710600191,
+STORE, 140249652703232, 140249682087935,
+STORE, 140249682087936, 140249710600191,
+ };
+
+ unsigned long set26[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140729464770560, 140737488351231,
+SNULL, 140729464774655, 140737488351231,
+STORE, 140729464770560, 140729464774655,
+STORE, 140729464639488, 140729464774655,
+STORE, 4194304, 5066751,
+STORE, 7159808, 7172095,
+STORE, 7172096, 7180287,
+STORE, 140729465114624, 140729465118719,
+STORE, 140729465102336, 140729465114623,
+STORE, 30867456, 30875647,
+STORE, 30867456, 31010815,
+STORE, 140109040988160, 140109042671615,
+STORE, 140109040959488, 140109040988159,
+STORE, 140109040943104, 140109040959487,
+ERASE, 140109040943104, 140109040959487,
+STORE, 140109040840704, 140109040959487,
+ERASE, 140109040840704, 140109040959487,
+STORE, 140109040951296, 140109040959487,
+ERASE, 140109040951296, 140109040959487,
+STORE, 140109040955392, 140109040959487,
+ERASE, 140109040955392, 140109040959487,
+ };
+ unsigned long set27[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140726128070656, 140737488351231,
+SNULL, 140726128074751, 140737488351231,
+STORE, 140726128070656, 140726128074751,
+STORE, 140726127939584, 140726128074751,
+STORE, 94478497189888, 94478499303423,
+SNULL, 94478497202175, 94478499303423,
+STORE, 94478497189888, 94478497202175,
+STORE, 94478497202176, 94478499303423,
+ERASE, 94478497202176, 94478499303423,
+STORE, 94478499295232, 94478499303423,
+STORE, 140415605723136, 140415607975935,
+SNULL, 140415605866495, 140415607975935,
+STORE, 140415605723136, 140415605866495,
+STORE, 140415605866496, 140415607975935,
+ERASE, 140415605866496, 140415607975935,
+STORE, 140415607963648, 140415607971839,
+STORE, 140415607971840, 140415607975935,
+STORE, 140726130024448, 140726130028543,
+STORE, 140726130012160, 140726130024447,
+STORE, 140415607934976, 140415607963647,
+STORE, 140415607926784, 140415607934975,
+STORE, 140415603245056, 140415605723135,
+SNULL, 140415603245056, 140415603613695,
+STORE, 140415603613696, 140415605723135,
+STORE, 140415603245056, 140415603613695,
+SNULL, 140415605710847, 140415605723135,
+STORE, 140415603613696, 140415605710847,
+STORE, 140415605710848, 140415605723135,
+ERASE, 140415605710848, 140415605723135,
+STORE, 140415605710848, 140415605723135,
+STORE, 140415599370240, 140415603245055,
+SNULL, 140415599370240, 140415601111039,
+STORE, 140415601111040, 140415603245055,
+STORE, 140415599370240, 140415601111039,
+SNULL, 140415603208191, 140415603245055,
+STORE, 140415601111040, 140415603208191,
+STORE, 140415603208192, 140415603245055,
+ERASE, 140415603208192, 140415603245055,
+STORE, 140415603208192, 140415603245055,
+STORE, 140415595692032, 140415599370239,
+SNULL, 140415595692032, 140415597207551,
+STORE, 140415597207552, 140415599370239,
+STORE, 140415595692032, 140415597207551,
+SNULL, 140415599304703, 140415599370239,
+STORE, 140415597207552, 140415599304703,
+STORE, 140415599304704, 140415599370239,
+SNULL, 140415599304704, 140415599353855,
+STORE, 140415599353856, 140415599370239,
+STORE, 140415599304704, 140415599353855,
+ERASE, 140415599304704, 140415599353855,
+STORE, 140415599304704, 140415599353855,
+ERASE, 140415599353856, 140415599370239,
+STORE, 140415599353856, 140415599370239,
+STORE, 140415593500672, 140415595692031,
+SNULL, 140415593500672, 140415593590783,
+STORE, 140415593590784, 140415595692031,
+STORE, 140415593500672, 140415593590783,
+SNULL, 140415595683839, 140415595692031,
+STORE, 140415593590784, 140415595683839,
+STORE, 140415595683840, 140415595692031,
+ERASE, 140415595683840, 140415595692031,
+STORE, 140415595683840, 140415595692031,
+STORE, 140415589703680, 140415593500671,
+SNULL, 140415589703680, 140415591362559,
+STORE, 140415591362560, 140415593500671,
+STORE, 140415589703680, 140415591362559,
+SNULL, 140415593459711, 140415593500671,
+STORE, 140415591362560, 140415593459711,
+STORE, 140415593459712, 140415593500671,
+SNULL, 140415593459712, 140415593484287,
+STORE, 140415593484288, 140415593500671,
+STORE, 140415593459712, 140415593484287,
+ERASE, 140415593459712, 140415593484287,
+STORE, 140415593459712, 140415593484287,
+ERASE, 140415593484288, 140415593500671,
+STORE, 140415593484288, 140415593500671,
+STORE, 140415587590144, 140415589703679,
+SNULL, 140415587590144, 140415587602431,
+STORE, 140415587602432, 140415589703679,
+STORE, 140415587590144, 140415587602431,
+SNULL, 140415589695487, 140415589703679,
+STORE, 140415587602432, 140415589695487,
+STORE, 140415589695488, 140415589703679,
+ERASE, 140415589695488, 140415589703679,
+STORE, 140415589695488, 140415589703679,
+STORE, 140415607918592, 140415607934975,
+STORE, 140415585398784, 140415587590143,
+SNULL, 140415585398784, 140415585480703,
+STORE, 140415585480704, 140415587590143,
+STORE, 140415585398784, 140415585480703,
+SNULL, 140415587573759, 140415587590143,
+STORE, 140415585480704, 140415587573759,
+STORE, 140415587573760, 140415587590143,
+SNULL, 140415587573760, 140415587581951,
+STORE, 140415587581952, 140415587590143,
+STORE, 140415587573760, 140415587581951,
+ERASE, 140415587573760, 140415587581951,
+STORE, 140415587573760, 140415587581951,
+ERASE, 140415587581952, 140415587590143,
+STORE, 140415587581952, 140415587590143,
+STORE, 140415583182848, 140415585398783,
+SNULL, 140415583182848, 140415583281151,
+STORE, 140415583281152, 140415585398783,
+STORE, 140415583182848, 140415583281151,
+SNULL, 140415585374207, 140415585398783,
+STORE, 140415583281152, 140415585374207,
+STORE, 140415585374208, 140415585398783,
+SNULL, 140415585374208, 140415585382399,
+STORE, 140415585382400, 140415585398783,
+STORE, 140415585374208, 140415585382399,
+ERASE, 140415585374208, 140415585382399,
+STORE, 140415585374208, 140415585382399,
+ERASE, 140415585382400, 140415585398783,
+STORE, 140415585382400, 140415585398783,
+STORE, 140415580979200, 140415583182847,
+SNULL, 140415580979200, 140415581081599,
+STORE, 140415581081600, 140415583182847,
+STORE, 140415580979200, 140415581081599,
+SNULL, 140415583174655, 140415583182847,
+STORE, 140415581081600, 140415583174655,
+STORE, 140415583174656, 140415583182847,
+ERASE, 140415583174656, 140415583182847,
+STORE, 140415583174656, 140415583182847,
+STORE, 140415578816512, 140415580979199,
+SNULL, 140415578816512, 140415578877951,
+STORE, 140415578877952, 140415580979199,
+STORE, 140415578816512, 140415578877951,
+SNULL, 140415580971007, 140415580979199,
+STORE, 140415578877952, 140415580971007,
+STORE, 140415580971008, 140415580979199,
+ERASE, 140415580971008, 140415580979199,
+STORE, 140415580971008, 140415580979199,
+STORE, 140415576563712, 140415578816511,
+SNULL, 140415576563712, 140415576715263,
+STORE, 140415576715264, 140415578816511,
+STORE, 140415576563712, 140415576715263,
+SNULL, 140415578808319, 140415578816511,
+STORE, 140415576715264, 140415578808319,
+STORE, 140415578808320, 140415578816511,
+ERASE, 140415578808320, 140415578816511,
+STORE, 140415578808320, 140415578816511,
+STORE, 140415574392832, 140415576563711,
+SNULL, 140415574392832, 140415574462463,
+STORE, 140415574462464, 140415576563711,
+STORE, 140415574392832, 140415574462463,
+SNULL, 140415576555519, 140415576563711,
+STORE, 140415574462464, 140415576555519,
+STORE, 140415576555520, 140415576563711,
+ERASE, 140415576555520, 140415576563711,
+STORE, 140415576555520, 140415576563711,
+STORE, 140415607910400, 140415607934975,
+STORE, 140415571230720, 140415574392831,
+SNULL, 140415571230720, 140415572291583,
+STORE, 140415572291584, 140415574392831,
+STORE, 140415571230720, 140415572291583,
+SNULL, 140415574384639, 140415574392831,
+STORE, 140415572291584, 140415574384639,
+STORE, 140415574384640, 140415574392831,
+ERASE, 140415574384640, 140415574392831,
+STORE, 140415574384640, 140415574392831,
+STORE, 140415607902208, 140415607934975,
+SNULL, 140415593476095, 140415593484287,
+STORE, 140415593459712, 140415593476095,
+STORE, 140415593476096, 140415593484287,
+SNULL, 140415574388735, 140415574392831,
+STORE, 140415574384640, 140415574388735,
+STORE, 140415574388736, 140415574392831,
+SNULL, 140415576559615, 140415576563711,
+STORE, 140415576555520, 140415576559615,
+STORE, 140415576559616, 140415576563711,
+SNULL, 140415589699583, 140415589703679,
+STORE, 140415589695488, 140415589699583,
+STORE, 140415589699584, 140415589703679,
+SNULL, 140415585378303, 140415585382399,
+STORE, 140415585374208, 140415585378303,
+STORE, 140415585378304, 140415585382399,
+SNULL, 140415578812415, 140415578816511,
+STORE, 140415578808320, 140415578812415,
+STORE, 140415578812416, 140415578816511,
+SNULL, 140415580975103, 140415580979199,
+STORE, 140415580971008, 140415580975103,
+STORE, 140415580975104, 140415580979199,
+SNULL, 140415583178751, 140415583182847,
+STORE, 140415583174656, 140415583178751,
+STORE, 140415583178752, 140415583182847,
+SNULL, 140415587577855, 140415587581951,
+STORE, 140415587573760, 140415587577855,
+STORE, 140415587577856, 140415587581951,
+SNULL, 140415595687935, 140415595692031,
+STORE, 140415595683840, 140415595687935,
+STORE, 140415595687936, 140415595692031,
+STORE, 140415607894016, 140415607934975,
+SNULL, 140415599345663, 140415599353855,
+STORE, 140415599304704, 140415599345663,
+STORE, 140415599345664, 140415599353855,
+SNULL, 140415603240959, 140415603245055,
+STORE, 140415603208192, 140415603240959,
+STORE, 140415603240960, 140415603245055,
+SNULL, 140415605719039, 140415605723135,
+STORE, 140415605710848, 140415605719039,
+STORE, 140415605719040, 140415605723135,
+SNULL, 94478499299327, 94478499303423,
+STORE, 94478499295232, 94478499299327,
+STORE, 94478499299328, 94478499303423,
+SNULL, 140415607967743, 140415607971839,
+STORE, 140415607963648, 140415607967743,
+STORE, 140415607967744, 140415607971839,
+ERASE, 140415607934976, 140415607963647,
+STORE, 94478511173632, 94478511378431,
+STORE, 140415606210560, 140415607894015,
+STORE, 140415607934976, 140415607963647,
+STORE, 94478511173632, 94478511513599,
+STORE, 94478511173632, 94478511648767,
+SNULL, 94478511615999, 94478511648767,
+STORE, 94478511173632, 94478511615999,
+STORE, 94478511616000, 94478511648767,
+ERASE, 94478511616000, 94478511648767,
+STORE, 94478511173632, 94478511751167,
+SNULL, 94478511747071, 94478511751167,
+STORE, 94478511173632, 94478511747071,
+STORE, 94478511747072, 94478511751167,
+ERASE, 94478511747072, 94478511751167,
+STORE, 94478511173632, 94478511882239,
+SNULL, 94478511878143, 94478511882239,
+STORE, 94478511173632, 94478511878143,
+STORE, 94478511878144, 94478511882239,
+ERASE, 94478511878144, 94478511882239,
+STORE, 94478511173632, 94478512013311,
+SNULL, 94478512009215, 94478512013311,
+STORE, 94478511173632, 94478512009215,
+STORE, 94478512009216, 94478512013311,
+ERASE, 94478512009216, 94478512013311,
+STORE, 94478511173632, 94478512144383,
+STORE, 94478511173632, 94478512279551,
+STORE, 140415606181888, 140415606210559,
+STORE, 140415569100800, 140415571230719,
+SNULL, 140415569100800, 140415569129471,
+STORE, 140415569129472, 140415571230719,
+STORE, 140415569100800, 140415569129471,
+SNULL, 140415571222527, 140415571230719,
+STORE, 140415569129472, 140415571222527,
+STORE, 140415571222528, 140415571230719,
+ERASE, 140415571222528, 140415571230719,
+STORE, 140415571222528, 140415571230719,
+STORE, 140415566905344, 140415569100799,
+SNULL, 140415566905344, 140415566987263,
+STORE, 140415566987264, 140415569100799,
+STORE, 140415566905344, 140415566987263,
+SNULL, 140415569084415, 140415569100799,
+STORE, 140415566987264, 140415569084415,
+STORE, 140415569084416, 140415569100799,
+SNULL, 140415569084416, 140415569092607,
+STORE, 140415569092608, 140415569100799,
+STORE, 140415569084416, 140415569092607,
+ERASE, 140415569084416, 140415569092607,
+STORE, 140415569084416, 140415569092607,
+ERASE, 140415569092608, 140415569100799,
+STORE, 140415569092608, 140415569100799,
+SNULL, 140415569088511, 140415569092607,
+STORE, 140415569084416, 140415569088511,
+STORE, 140415569088512, 140415569092607,
+SNULL, 140415571226623, 140415571230719,
+STORE, 140415571222528, 140415571226623,
+STORE, 140415571226624, 140415571230719,
+ERASE, 140415606181888, 140415606210559,
+STORE, 140415606181888, 140415606210559,
+STORE, 140415564759040, 140415566905343,
+SNULL, 140415564759040, 140415564804095,
+STORE, 140415564804096, 140415566905343,
+STORE, 140415564759040, 140415564804095,
+SNULL, 140415566897151, 140415566905343,
+STORE, 140415564804096, 140415566897151,
+STORE, 140415566897152, 140415566905343,
+ERASE, 140415566897152, 140415566905343,
+STORE, 140415566897152, 140415566905343,
+STORE, 140415562588160, 140415564759039,
+SNULL, 140415562588160, 140415562629119,
+STORE, 140415562629120, 140415564759039,
+STORE, 140415562588160, 140415562629119,
+SNULL, 140415564726271, 140415564759039,
+STORE, 140415562629120, 140415564726271,
+STORE, 140415564726272, 140415564759039,
+SNULL, 140415564726272, 140415564734463,
+STORE, 140415564734464, 140415564759039,
+STORE, 140415564726272, 140415564734463,
+ERASE, 140415564726272, 140415564734463,
+STORE, 140415564726272, 140415564734463,
+ERASE, 140415564734464, 140415564759039,
+STORE, 140415564734464, 140415564759039,
+SNULL, 140415564730367, 140415564734463,
+STORE, 140415564726272, 140415564730367,
+STORE, 140415564730368, 140415564734463,
+SNULL, 140415566901247, 140415566905343,
+STORE, 140415566897152, 140415566901247,
+STORE, 140415566901248, 140415566905343,
+ERASE, 140415606181888, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415605944320, 140415606210559,
+ERASE, 140415605944320, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 94478511173632, 94478512414719,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 140415606206464, 140415606210559,
+ERASE, 140415606206464, 140415606210559,
+STORE, 94478511173632, 94478512652287,
+STORE, 94478511173632, 94478512787455,
+STORE, 94478511173632, 94478512922623,
+STORE, 94478511173632, 94478513057791,
+STORE, 140415537422336, 140415562588159,
+STORE, 94478511173632, 94478513192959,
+STORE, 94478511173632, 94478513356799,
+STORE, 94478511173632, 94478513491967,
+STORE, 94478511173632, 94478513627135,
+STORE, 94478511173632, 94478513790975,
+STORE, 94478511173632, 94478513926143,
+STORE, 94478511173632, 94478514061311,
+STORE, 94478511173632, 94478514196479,
+STORE, 94478511173632, 94478514331647,
+STORE, 94478511173632, 94478514606079,
+STORE, 94478511173632, 94478514741247,
+STORE, 94478511173632, 94478514876415,
+STORE, 94478511173632, 94478515011583,
+STORE, 94478511173632, 94478515146751,
+STORE, 94478511173632, 94478515281919,
+STORE, 94478511173632, 94478515474431,
+STORE, 94478511173632, 94478515609599,
+STORE, 94478511173632, 94478515744767,
+STORE, 140415536922624, 140415562588159,
+STORE, 94478511173632, 94478515879935,
+STORE, 94478511173632, 94478516015103,
+STORE, 94478511173632, 94478516150271,
+STORE, 94478511173632, 94478516285439,
+STORE, 94478511173632, 94478516420607,
+STORE, 94478511173632, 94478516555775,
+STORE, 94478511173632, 94478516690943,
+STORE, 94478511173632, 94478516826111,
+STORE, 94478511173632, 94478516961279,
+STORE, 94478511173632, 94478517231615,
+STORE, 94478511173632, 94478517366783,
+STORE, 94478511173632, 94478517501951,
+STORE, 94478511173632, 94478517637119,
+STORE, 94478511173632, 94478517772287,
+STORE, 94478511173632, 94478517907455,
+STORE, 94478511173632, 94478518042623,
+STORE, 94478511173632, 94478518177791,
+STORE, 94478511173632, 94478518312959,
+STORE, 94478511173632, 94478518448127,
+STORE, 140415535910912, 140415562588159,
+SNULL, 140415536922623, 140415562588159,
+STORE, 140415535910912, 140415536922623,
+STORE, 140415536922624, 140415562588159,
+SNULL, 140415536922624, 140415537422335,
+STORE, 140415537422336, 140415562588159,
+STORE, 140415536922624, 140415537422335,
+ERASE, 140415536922624, 140415537422335,
+STORE, 94478511173632, 94478518583295,
+STORE, 94478511173632, 94478518718463,
+STORE, 94478511173632, 94478518853631,
+STORE, 94478511173632, 94478518988799,
+STORE, 94478511173632, 94478519123967,
+STORE, 94478511173632, 94478519259135,
+STORE, 140415509696512, 140415535910911,
+ERASE, 140415537422336, 140415562588159,
+STORE, 140415482433536, 140415509696511,
+ };
+ unsigned long set28[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722475622400, 140737488351231,
+SNULL, 140722475626495, 140737488351231,
+STORE, 140722475622400, 140722475626495,
+STORE, 140722475491328, 140722475626495,
+STORE, 93865834291200, 93865836548095,
+SNULL, 93865834422271, 93865836548095,
+STORE, 93865834291200, 93865834422271,
+STORE, 93865834422272, 93865836548095,
+ERASE, 93865834422272, 93865836548095,
+STORE, 93865836519424, 93865836527615,
+STORE, 93865836527616, 93865836548095,
+STORE, 139918411104256, 139918413357055,
+SNULL, 139918411247615, 139918413357055,
+STORE, 139918411104256, 139918411247615,
+STORE, 139918411247616, 139918413357055,
+ERASE, 139918411247616, 139918413357055,
+STORE, 139918413344768, 139918413352959,
+STORE, 139918413352960, 139918413357055,
+STORE, 140722476642304, 140722476646399,
+STORE, 140722476630016, 140722476642303,
+STORE, 139918413316096, 139918413344767,
+STORE, 139918413307904, 139918413316095,
+STORE, 139918408888320, 139918411104255,
+SNULL, 139918408888320, 139918408986623,
+STORE, 139918408986624, 139918411104255,
+STORE, 139918408888320, 139918408986623,
+SNULL, 139918411079679, 139918411104255,
+STORE, 139918408986624, 139918411079679,
+STORE, 139918411079680, 139918411104255,
+SNULL, 139918411079680, 139918411087871,
+STORE, 139918411087872, 139918411104255,
+STORE, 139918411079680, 139918411087871,
+ERASE, 139918411079680, 139918411087871,
+STORE, 139918411079680, 139918411087871,
+ERASE, 139918411087872, 139918411104255,
+STORE, 139918411087872, 139918411104255,
+STORE, 139918405091328, 139918408888319,
+SNULL, 139918405091328, 139918406750207,
+STORE, 139918406750208, 139918408888319,
+STORE, 139918405091328, 139918406750207,
+SNULL, 139918408847359, 139918408888319,
+STORE, 139918406750208, 139918408847359,
+STORE, 139918408847360, 139918408888319,
+SNULL, 139918408847360, 139918408871935,
+STORE, 139918408871936, 139918408888319,
+STORE, 139918408847360, 139918408871935,
+ERASE, 139918408847360, 139918408871935,
+STORE, 139918408847360, 139918408871935,
+ERASE, 139918408871936, 139918408888319,
+STORE, 139918408871936, 139918408888319,
+STORE, 139918413299712, 139918413316095,
+SNULL, 139918408863743, 139918408871935,
+STORE, 139918408847360, 139918408863743,
+STORE, 139918408863744, 139918408871935,
+SNULL, 139918411083775, 139918411087871,
+STORE, 139918411079680, 139918411083775,
+STORE, 139918411083776, 139918411087871,
+SNULL, 93865836523519, 93865836527615,
+STORE, 93865836519424, 93865836523519,
+STORE, 93865836523520, 93865836527615,
+SNULL, 139918413348863, 139918413352959,
+STORE, 139918413344768, 139918413348863,
+STORE, 139918413348864, 139918413352959,
+ERASE, 139918413316096, 139918413344767,
+STORE, 93865848528896, 93865848664063,
+ };
+ unsigned long set29[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734467944448, 140737488351231,
+SNULL, 140734467948543, 140737488351231,
+STORE, 140734467944448, 140734467948543,
+STORE, 140734467813376, 140734467948543,
+STORE, 94880407924736, 94880410177535,
+SNULL, 94880408055807, 94880410177535,
+STORE, 94880407924736, 94880408055807,
+STORE, 94880408055808, 94880410177535,
+ERASE, 94880408055808, 94880410177535,
+STORE, 94880410148864, 94880410157055,
+STORE, 94880410157056, 94880410177535,
+STORE, 140143367815168, 140143370067967,
+SNULL, 140143367958527, 140143370067967,
+STORE, 140143367815168, 140143367958527,
+STORE, 140143367958528, 140143370067967,
+ERASE, 140143367958528, 140143370067967,
+STORE, 140143370055680, 140143370063871,
+STORE, 140143370063872, 140143370067967,
+STORE, 140734468329472, 140734468333567,
+STORE, 140734468317184, 140734468329471,
+STORE, 140143370027008, 140143370055679,
+STORE, 140143370018816, 140143370027007,
+STORE, 140143365599232, 140143367815167,
+SNULL, 140143365599232, 140143365697535,
+STORE, 140143365697536, 140143367815167,
+STORE, 140143365599232, 140143365697535,
+SNULL, 140143367790591, 140143367815167,
+STORE, 140143365697536, 140143367790591,
+STORE, 140143367790592, 140143367815167,
+SNULL, 140143367790592, 140143367798783,
+STORE, 140143367798784, 140143367815167,
+STORE, 140143367790592, 140143367798783,
+ERASE, 140143367790592, 140143367798783,
+STORE, 140143367790592, 140143367798783,
+ERASE, 140143367798784, 140143367815167,
+STORE, 140143367798784, 140143367815167,
+STORE, 140143361802240, 140143365599231,
+SNULL, 140143361802240, 140143363461119,
+STORE, 140143363461120, 140143365599231,
+STORE, 140143361802240, 140143363461119,
+SNULL, 140143365558271, 140143365599231,
+STORE, 140143363461120, 140143365558271,
+STORE, 140143365558272, 140143365599231,
+SNULL, 140143365558272, 140143365582847,
+STORE, 140143365582848, 140143365599231,
+STORE, 140143365558272, 140143365582847,
+ERASE, 140143365558272, 140143365582847,
+STORE, 140143365558272, 140143365582847,
+ERASE, 140143365582848, 140143365599231,
+STORE, 140143365582848, 140143365599231,
+STORE, 140143370010624, 140143370027007,
+SNULL, 140143365574655, 140143365582847,
+STORE, 140143365558272, 140143365574655,
+STORE, 140143365574656, 140143365582847,
+SNULL, 140143367794687, 140143367798783,
+STORE, 140143367790592, 140143367794687,
+STORE, 140143367794688, 140143367798783,
+SNULL, 94880410152959, 94880410157055,
+STORE, 94880410148864, 94880410152959,
+STORE, 94880410152960, 94880410157055,
+SNULL, 140143370059775, 140143370063871,
+STORE, 140143370055680, 140143370059775,
+STORE, 140143370059776, 140143370063871,
+ERASE, 140143370027008, 140143370055679,
+STORE, 94880442400768, 94880442535935,
+STORE, 140143353409536, 140143361802239,
+SNULL, 140143353413631, 140143361802239,
+STORE, 140143353409536, 140143353413631,
+STORE, 140143353413632, 140143361802239,
+STORE, 140143345016832, 140143353409535,
+STORE, 140143210799104, 140143345016831,
+SNULL, 140143210799104, 140143239364607,
+STORE, 140143239364608, 140143345016831,
+STORE, 140143210799104, 140143239364607,
+ERASE, 140143210799104, 140143239364607,
+SNULL, 140143306473471, 140143345016831,
+STORE, 140143239364608, 140143306473471,
+STORE, 140143306473472, 140143345016831,
+ERASE, 140143306473472, 140143345016831,
+SNULL, 140143239499775, 140143306473471,
+STORE, 140143239364608, 140143239499775,
+STORE, 140143239499776, 140143306473471,
+SNULL, 140143345020927, 140143353409535,
+STORE, 140143345016832, 140143345020927,
+STORE, 140143345020928, 140143353409535,
+STORE, 140143336624128, 140143345016831,
+SNULL, 140143336628223, 140143345016831,
+STORE, 140143336624128, 140143336628223,
+STORE, 140143336628224, 140143345016831,
+STORE, 140143328231424, 140143336624127,
+SNULL, 140143328235519, 140143336624127,
+STORE, 140143328231424, 140143328235519,
+STORE, 140143328235520, 140143336624127,
+STORE, 140143319838720, 140143328231423,
+SNULL, 140143319842815, 140143328231423,
+STORE, 140143319838720, 140143319842815,
+STORE, 140143319842816, 140143328231423,
+STORE, 140143311446016, 140143319838719,
+STORE, 140143105146880, 140143239364607,
+STORE, 140143096754176, 140143105146879,
+STORE, 140143029645312, 140143096754175,
+ERASE, 140143029645312, 140143096754175,
+STORE, 140142962536448, 140143096754175,
+SNULL, 140142962536448, 140142970929151,
+STORE, 140142970929152, 140143096754175,
+STORE, 140142962536448, 140142970929151,
+ERASE, 140142962536448, 140142970929151,
+STORE, 140142962536448, 140142970929151,
+STORE, 140142828318720, 140142962536447,
+STORE, 140142819926016, 140142828318719,
+SNULL, 140142828318720, 140142836711423,
+STORE, 140142836711424, 140142962536447,
+STORE, 140142828318720, 140142836711423,
+ERASE, 140142828318720, 140142836711423,
+SNULL, 140143172255743, 140143239364607,
+STORE, 140143105146880, 140143172255743,
+STORE, 140143172255744, 140143239364607,
+ERASE, 140143172255744, 140143239364607,
+SNULL, 140143105282047, 140143172255743,
+STORE, 140143105146880, 140143105282047,
+STORE, 140143105282048, 140143172255743,
+SNULL, 140143038038015, 140143096754175,
+STORE, 140142970929152, 140143038038015,
+STORE, 140143038038016, 140143096754175,
+ERASE, 140143038038016, 140143096754175,
+SNULL, 140142971064319, 140143038038015,
+STORE, 140142970929152, 140142971064319,
+STORE, 140142971064320, 140143038038015,
+SNULL, 140142903820287, 140142962536447,
+STORE, 140142836711424, 140142903820287,
+STORE, 140142903820288, 140142962536447,
+ERASE, 140142903820288, 140142962536447,
+SNULL, 140142836846591, 140142903820287,
+STORE, 140142836711424, 140142836846591,
+STORE, 140142836846592, 140142903820287,
+STORE, 140142685708288, 140142819926015,
+SNULL, 140143311450111, 140143319838719,
+STORE, 140143311446016, 140143311450111,
+STORE, 140143311450112, 140143319838719,
+SNULL, 140142962540543, 140142970929151,
+STORE, 140142962536448, 140142962540543,
+STORE, 140142962540544, 140142970929151,
+SNULL, 140142685708288, 140142702493695,
+STORE, 140142702493696, 140142819926015,
+STORE, 140142685708288, 140142702493695,
+ERASE, 140142685708288, 140142702493695,
+SNULL, 140142769602559, 140142819926015,
+STORE, 140142702493696, 140142769602559,
+STORE, 140142769602560, 140142819926015,
+ERASE, 140142769602560, 140142819926015,
+SNULL, 140142702628863, 140142769602559,
+STORE, 140142702493696, 140142702628863,
+STORE, 140142702628864, 140142769602559,
+STORE, 140143230971904, 140143239364607,
+SNULL, 140143230975999, 140143239364607,
+STORE, 140143230971904, 140143230975999,
+STORE, 140143230976000, 140143239364607,
+SNULL, 140143096758271, 140143105146879,
+STORE, 140143096754176, 140143096758271,
+STORE, 140143096758272, 140143105146879,
+STORE, 140143222579200, 140143230971903,
+SNULL, 140143222583295, 140143230971903,
+STORE, 140143222579200, 140143222583295,
+STORE, 140143222583296, 140143230971903,
+STORE, 140143214186496, 140143222579199,
+SNULL, 140142819930111, 140142828318719,
+STORE, 140142819926016, 140142819930111,
+STORE, 140142819930112, 140142828318719,
+STORE, 140143205793792, 140143222579199,
+SNULL, 140143205793792, 140143214186495,
+STORE, 140143214186496, 140143222579199,
+STORE, 140143205793792, 140143214186495,
+SNULL, 140143214190591, 140143222579199,
+STORE, 140143214186496, 140143214190591,
+STORE, 140143214190592, 140143222579199,
+SNULL, 140143205797887, 140143214186495,
+STORE, 140143205793792, 140143205797887,
+STORE, 140143205797888, 140143214186495,
+STORE, 140143197401088, 140143205793791,
+SNULL, 140143197405183, 140143205793791,
+STORE, 140143197401088, 140143197405183,
+STORE, 140143197405184, 140143205793791,
+STORE, 140143189008384, 140143197401087,
+STORE, 140143180615680, 140143197401087,
+STORE, 140143088361472, 140143096754175,
+SNULL, 140143180619775, 140143197401087,
+STORE, 140143180615680, 140143180619775,
+STORE, 140143180619776, 140143197401087,
+SNULL, 140143180619776, 140143189008383,
+STORE, 140143189008384, 140143197401087,
+STORE, 140143180619776, 140143189008383,
+SNULL, 140143189012479, 140143197401087,
+STORE, 140143189008384, 140143189012479,
+STORE, 140143189012480, 140143197401087,
+SNULL, 140143088365567, 140143096754175,
+STORE, 140143088361472, 140143088365567,
+STORE, 140143088365568, 140143096754175,
+STORE, 140143079968768, 140143088361471,
+SNULL, 140143079972863, 140143088361471,
+STORE, 140143079968768, 140143079972863,
+STORE, 140143079972864, 140143088361471,
+STORE, 140143071576064, 140143079968767,
+SNULL, 140143071580159, 140143079968767,
+STORE, 140143071576064, 140143071580159,
+STORE, 140143071580160, 140143079968767,
+STORE, 140143063183360, 140143071576063,
+STORE, 140143054790656, 140143071576063,
+SNULL, 140143054794751, 140143071576063,
+STORE, 140143054790656, 140143054794751,
+STORE, 140143054794752, 140143071576063,
+SNULL, 140143054794752, 140143063183359,
+STORE, 140143063183360, 140143071576063,
+STORE, 140143054794752, 140143063183359,
+SNULL, 140143063187455, 140143071576063,
+STORE, 140143063183360, 140143063187455,
+STORE, 140143063187456, 140143071576063,
+STORE, 140143046397952, 140143054790655,
+STORE, 140142954143744, 140142962536447,
+STORE, 140142945751040, 140142962536447,
+STORE, 140142937358336, 140142962536447,
+STORE, 140142928965632, 140142962536447,
+STORE, 140142568275968, 140142702493695,
+SNULL, 140142635384831, 140142702493695,
+STORE, 140142568275968, 140142635384831,
+STORE, 140142635384832, 140142702493695,
+ERASE, 140142635384832, 140142702493695,
+STORE, 140142920572928, 140142962536447,
+STORE, 140142912180224, 140142962536447,
+STORE, 140142568275968, 140142702493695,
+SNULL, 140142568275968, 140142635384831,
+STORE, 140142635384832, 140142702493695,
+STORE, 140142568275968, 140142635384831,
+SNULL, 140142635519999, 140142702493695,
+STORE, 140142635384832, 140142635519999,
+STORE, 140142635520000, 140142702493695,
+STORE, 140142819930112, 140142836711423,
+STORE, 140142811533312, 140142819926015,
+STORE, 140142434058240, 140142635384831,
+SNULL, 140142501167103, 140142635384831,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142501167104, 140142635384831,
+SNULL, 140142501167104, 140142568275967,
+STORE, 140142568275968, 140142635384831,
+STORE, 140142501167104, 140142568275967,
+ERASE, 140142501167104, 140142568275967,
+STORE, 140142299840512, 140142501167103,
+STORE, 140142803140608, 140142819926015,
+SNULL, 140142366949375, 140142501167103,
+STORE, 140142299840512, 140142366949375,
+STORE, 140142366949376, 140142501167103,
+SNULL, 140142366949376, 140142434058239,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142366949376, 140142434058239,
+ERASE, 140142366949376, 140142434058239,
+STORE, 140142794747904, 140142819926015,
+STORE, 140142786355200, 140142819926015,
+STORE, 140142299840512, 140142501167103,
+STORE, 140142777962496, 140142819926015,
+STORE, 140142559883264, 140142568275967,
+STORE, 140142232731648, 140142501167103,
+STORE, 140142551490560, 140142568275967,
+SNULL, 140142777962496, 140142803140607,
+STORE, 140142803140608, 140142819926015,
+STORE, 140142777962496, 140142803140607,
+SNULL, 140142803144703, 140142819926015,
+STORE, 140142803140608, 140142803144703,
+STORE, 140142803144704, 140142819926015,
+STORE, 140142543097856, 140142568275967,
+STORE, 140142098513920, 140142501167103,
+SNULL, 140142165622783, 140142501167103,
+STORE, 140142098513920, 140142165622783,
+STORE, 140142165622784, 140142501167103,
+SNULL, 140142165622784, 140142232731647,
+STORE, 140142232731648, 140142501167103,
+STORE, 140142165622784, 140142232731647,
+ERASE, 140142165622784, 140142232731647,
+SNULL, 140142568411135, 140142635384831,
+STORE, 140142568275968, 140142568411135,
+STORE, 140142568411136, 140142635384831,
+STORE, 140141964296192, 140142165622783,
+SNULL, 140142912180224, 140142928965631,
+STORE, 140142928965632, 140142962536447,
+STORE, 140142912180224, 140142928965631,
+SNULL, 140142928969727, 140142962536447,
+STORE, 140142928965632, 140142928969727,
+STORE, 140142928969728, 140142962536447,
+STORE, 140141830078464, 140142165622783,
+SNULL, 140142912184319, 140142928965631,
+STORE, 140142912180224, 140142912184319,
+STORE, 140142912184320, 140142928965631,
+SNULL, 140142232731648, 140142434058239,
+STORE, 140142434058240, 140142501167103,
+STORE, 140142232731648, 140142434058239,
+SNULL, 140142434193407, 140142501167103,
+STORE, 140142434058240, 140142434193407,
+STORE, 140142434193408, 140142501167103,
+SNULL, 140142232731648, 140142299840511,
+STORE, 140142299840512, 140142434058239,
+STORE, 140142232731648, 140142299840511,
+SNULL, 140142299975679, 140142434058239,
+STORE, 140142299840512, 140142299975679,
+STORE, 140142299975680, 140142434058239,
+SNULL, 140142928969728, 140142954143743,
+STORE, 140142954143744, 140142962536447,
+STORE, 140142928969728, 140142954143743,
+SNULL, 140142954147839, 140142962536447,
+STORE, 140142954143744, 140142954147839,
+STORE, 140142954147840, 140142962536447,
+STORE, 140141830078464, 140142299840511,
+SNULL, 140142543097856, 140142559883263,
+STORE, 140142559883264, 140142568275967,
+STORE, 140142543097856, 140142559883263,
+SNULL, 140142559887359, 140142568275967,
+STORE, 140142559883264, 140142559887359,
+STORE, 140142559887360, 140142568275967,
+STORE, 140142534705152, 140142559883263,
+SNULL, 140142928969728, 140142945751039,
+STORE, 140142945751040, 140142954143743,
+STORE, 140142928969728, 140142945751039,
+SNULL, 140142945755135, 140142954143743,
+STORE, 140142945751040, 140142945755135,
+STORE, 140142945755136, 140142954143743,
+SNULL, 140142299975680, 140142366949375,
+STORE, 140142366949376, 140142434058239,
+STORE, 140142299975680, 140142366949375,
+SNULL, 140142367084543, 140142434058239,
+STORE, 140142366949376, 140142367084543,
+STORE, 140142367084544, 140142434058239,
+SNULL, 140142928969728, 140142937358335,
+STORE, 140142937358336, 140142945751039,
+STORE, 140142928969728, 140142937358335,
+SNULL, 140142937362431, 140142945751039,
+STORE, 140142937358336, 140142937362431,
+STORE, 140142937362432, 140142945751039,
+SNULL, 140141830078464, 140142232731647,
+STORE, 140142232731648, 140142299840511,
+STORE, 140141830078464, 140142232731647,
+SNULL, 140142232866815, 140142299840511,
+STORE, 140142232731648, 140142232866815,
+STORE, 140142232866816, 140142299840511,
+SNULL, 140142534705152, 140142543097855,
+STORE, 140142543097856, 140142559883263,
+STORE, 140142534705152, 140142543097855,
+SNULL, 140142543101951, 140142559883263,
+STORE, 140142543097856, 140142543101951,
+STORE, 140142543101952, 140142559883263,
+STORE, 140142526312448, 140142543097855,
+STORE, 140142517919744, 140142543097855,
+SNULL, 140141830078464, 140142098513919,
+STORE, 140142098513920, 140142232731647,
+STORE, 140141830078464, 140142098513919,
+SNULL, 140142098649087, 140142232731647,
+STORE, 140142098513920, 140142098649087,
+STORE, 140142098649088, 140142232731647,
+SNULL, 140142031405055, 140142098513919,
+STORE, 140141830078464, 140142031405055,
+STORE, 140142031405056, 140142098513919,
+ERASE, 140142031405056, 140142098513919,
+SNULL, 140141830078464, 140141964296191,
+STORE, 140141964296192, 140142031405055,
+STORE, 140141830078464, 140141964296191,
+SNULL, 140141964431359, 140142031405055,
+STORE, 140141964296192, 140141964431359,
+STORE, 140141964431360, 140142031405055,
+STORE, 140142509527040, 140142543097855,
+SNULL, 140141897187327, 140141964296191,
+STORE, 140141830078464, 140141897187327,
+STORE, 140141897187328, 140141964296191,
+ERASE, 140141897187328, 140141964296191,
+SNULL, 140141830213631, 140141897187327,
+STORE, 140141830078464, 140141830213631,
+STORE, 140141830213632, 140141897187327,
+SNULL, 140142803144704, 140142811533311,
+STORE, 140142811533312, 140142819926015,
+STORE, 140142803144704, 140142811533311,
+SNULL, 140142811537407, 140142819926015,
+STORE, 140142811533312, 140142811537407,
+STORE, 140142811537408, 140142819926015,
+SNULL, 140142098649088, 140142165622783,
+STORE, 140142165622784, 140142232731647,
+STORE, 140142098649088, 140142165622783,
+SNULL, 140142165757951, 140142232731647,
+STORE, 140142165622784, 140142165757951,
+STORE, 140142165757952, 140142232731647,
+STORE, 140142090121216, 140142098513919,
+SNULL, 140142777962496, 140142786355199,
+STORE, 140142786355200, 140142803140607,
+STORE, 140142777962496, 140142786355199,
+SNULL, 140142786359295, 140142803140607,
+STORE, 140142786355200, 140142786359295,
+STORE, 140142786359296, 140142803140607,
+SNULL, 140142509527040, 140142534705151,
+STORE, 140142534705152, 140142543097855,
+STORE, 140142509527040, 140142534705151,
+SNULL, 140142534709247, 140142543097855,
+STORE, 140142534705152, 140142534709247,
+STORE, 140142534709248, 140142543097855,
+STORE, 140142081728512, 140142098513919,
+SNULL, 140142786359296, 140142794747903,
+STORE, 140142794747904, 140142803140607,
+STORE, 140142786359296, 140142794747903,
+SNULL, 140142794751999, 140142803140607,
+STORE, 140142794747904, 140142794751999,
+STORE, 140142794752000, 140142803140607,
+STORE, 140142073335808, 140142098513919,
+SNULL, 140142073339903, 140142098513919,
+STORE, 140142073335808, 140142073339903,
+STORE, 140142073339904, 140142098513919,
+SNULL, 140142543101952, 140142551490559,
+STORE, 140142551490560, 140142559883263,
+STORE, 140142543101952, 140142551490559,
+SNULL, 140142551494655, 140142559883263,
+STORE, 140142551490560, 140142551494655,
+STORE, 140142551494656, 140142559883263,
+SNULL, 140142509527040, 140142517919743,
+STORE, 140142517919744, 140142534705151,
+STORE, 140142509527040, 140142517919743,
+SNULL, 140142517923839, 140142534705151,
+STORE, 140142517919744, 140142517923839,
+STORE, 140142517923840, 140142534705151,
+STORE, 140142064943104, 140142073335807,
+SNULL, 140142073339904, 140142090121215,
+STORE, 140142090121216, 140142098513919,
+STORE, 140142073339904, 140142090121215,
+SNULL, 140142090125311, 140142098513919,
+STORE, 140142090121216, 140142090125311,
+STORE, 140142090125312, 140142098513919,
+STORE, 140142056550400, 140142073335807,
+SNULL, 140142056554495, 140142073335807,
+STORE, 140142056550400, 140142056554495,
+STORE, 140142056554496, 140142073335807,
+STORE, 140142048157696, 140142056550399,
+SNULL, 140142509531135, 140142517919743,
+STORE, 140142509527040, 140142509531135,
+STORE, 140142509531136, 140142517919743,
+SNULL, 140142777966591, 140142786355199,
+STORE, 140142777962496, 140142777966591,
+STORE, 140142777966592, 140142786355199,
+SNULL, 140143046402047, 140143054790655,
+STORE, 140143046397952, 140143046402047,
+STORE, 140143046402048, 140143054790655,
+SNULL, 140142912184320, 140142920572927,
+STORE, 140142920572928, 140142928965631,
+STORE, 140142912184320, 140142920572927,
+SNULL, 140142920577023, 140142928965631,
+STORE, 140142920572928, 140142920577023,
+STORE, 140142920577024, 140142928965631,
+STORE, 140142039764992, 140142056550399,
+STORE, 140141955903488, 140141964296191,
+SNULL, 140142819930112, 140142828318719,
+STORE, 140142828318720, 140142836711423,
+STORE, 140142819930112, 140142828318719,
+SNULL, 140142828322815, 140142836711423,
+STORE, 140142828318720, 140142828322815,
+STORE, 140142828322816, 140142836711423,
+SNULL, 140142517923840, 140142526312447,
+STORE, 140142526312448, 140142534705151,
+STORE, 140142517923840, 140142526312447,
+SNULL, 140142526316543, 140142534705151,
+STORE, 140142526312448, 140142526316543,
+STORE, 140142526316544, 140142534705151,
+STORE, 140141947510784, 140141964296191,
+SNULL, 140142056554496, 140142064943103,
+STORE, 140142064943104, 140142073335807,
+STORE, 140142056554496, 140142064943103,
+SNULL, 140142064947199, 140142073335807,
+STORE, 140142064943104, 140142064947199,
+STORE, 140142064947200, 140142073335807,
+SNULL, 140142073339904, 140142081728511,
+STORE, 140142081728512, 140142090121215,
+STORE, 140142073339904, 140142081728511,
+SNULL, 140142081732607, 140142090121215,
+STORE, 140142081728512, 140142081732607,
+STORE, 140142081732608, 140142090121215,
+STORE, 140141939118080, 140141964296191,
+STORE, 140141930725376, 140141964296191,
+STORE, 140141922332672, 140141964296191,
+STORE, 140141913939968, 140141964296191,
+SNULL, 140141913939968, 140141922332671,
+STORE, 140141922332672, 140141964296191,
+STORE, 140141913939968, 140141922332671,
+SNULL, 140141922336767, 140141964296191,
+STORE, 140141922332672, 140141922336767,
+STORE, 140141922336768, 140141964296191,
+STORE, 140141905547264, 140141922332671,
+SNULL, 140141905551359, 140141922332671,
+STORE, 140141905547264, 140141905551359,
+STORE, 140141905551360, 140141922332671,
+STORE, 140141821685760, 140141830078463,
+STORE, 140141813293056, 140141830078463,
+STORE, 140141804900352, 140141830078463,
+STORE, 140141796507648, 140141830078463,
+SNULL, 140141796511743, 140141830078463,
+STORE, 140141796507648, 140141796511743,
+STORE, 140141796511744, 140141830078463,
+SNULL, 140141922336768, 140141955903487,
+STORE, 140141955903488, 140141964296191,
+STORE, 140141922336768, 140141955903487,
+SNULL, 140141955907583, 140141964296191,
+STORE, 140141955903488, 140141955907583,
+STORE, 140141955907584, 140141964296191,
+STORE, 140141788114944, 140141796507647,
+STORE, 140141779722240, 140141796507647,
+SNULL, 140141779722240, 140141788114943,
+STORE, 140141788114944, 140141796507647,
+STORE, 140141779722240, 140141788114943,
+SNULL, 140141788119039, 140141796507647,
+STORE, 140141788114944, 140141788119039,
+STORE, 140141788119040, 140141796507647,
+SNULL, 140141922336768, 140141947510783,
+STORE, 140141947510784, 140141955903487,
+STORE, 140141922336768, 140141947510783,
+SNULL, 140141947514879, 140141955903487,
+STORE, 140141947510784, 140141947514879,
+STORE, 140141947514880, 140141955903487,
+SNULL, 140142039764992, 140142048157695,
+STORE, 140142048157696, 140142056550399,
+STORE, 140142039764992, 140142048157695,
+SNULL, 140142048161791, 140142056550399,
+STORE, 140142048157696, 140142048161791,
+STORE, 140142048161792, 140142056550399,
+SNULL, 140142039769087, 140142048157695,
+STORE, 140142039764992, 140142039769087,
+STORE, 140142039769088, 140142048157695,
+SNULL, 140141796511744, 140141804900351,
+STORE, 140141804900352, 140141830078463,
+STORE, 140141796511744, 140141804900351,
+SNULL, 140141804904447, 140141830078463,
+STORE, 140141804900352, 140141804904447,
+STORE, 140141804904448, 140141830078463,
+STORE, 140141771329536, 140141788114943,
+STORE, 140141762936832, 140141788114943,
+STORE, 140141754544128, 140141788114943,
+SNULL, 140141804904448, 140141821685759,
+STORE, 140141821685760, 140141830078463,
+STORE, 140141804904448, 140141821685759,
+SNULL, 140141821689855, 140141830078463,
+STORE, 140141821685760, 140141821689855,
+STORE, 140141821689856, 140141830078463,
+SNULL, 140141922336768, 140141939118079,
+STORE, 140141939118080, 140141947510783,
+STORE, 140141922336768, 140141939118079,
+SNULL, 140141939122175, 140141947510783,
+STORE, 140141939118080, 140141939122175,
+STORE, 140141939122176, 140141947510783,
+SNULL, 140141905551360, 140141913939967,
+STORE, 140141913939968, 140141922332671,
+STORE, 140141905551360, 140141913939967,
+SNULL, 140141913944063, 140141922332671,
+STORE, 140141913939968, 140141913944063,
+STORE, 140141913944064, 140141922332671,
+STORE, 140141746151424, 140141788114943,
+STORE, 140141737758720, 140141788114943,
+SNULL, 140141804904448, 140141813293055,
+STORE, 140141813293056, 140141821685759,
+STORE, 140141804904448, 140141813293055,
+SNULL, 140141813297151, 140141821685759,
+STORE, 140141813293056, 140141813297151,
+STORE, 140141813297152, 140141821685759,
+STORE, 140141729366016, 140141788114943,
+STORE, 140141720973312, 140141788114943,
+STORE, 140141712580608, 140141788114943,
+SNULL, 140141712584703, 140141788114943,
+STORE, 140141712580608, 140141712584703,
+STORE, 140141712584704, 140141788114943,
+SNULL, 140141922336768, 140141930725375,
+STORE, 140141930725376, 140141939118079,
+STORE, 140141922336768, 140141930725375,
+SNULL, 140141930729471, 140141939118079,
+STORE, 140141930725376, 140141930729471,
+STORE, 140141930729472, 140141939118079,
+STORE, 140141704187904, 140141712580607,
+SNULL, 140141704191999, 140141712580607,
+STORE, 140141704187904, 140141704191999,
+STORE, 140141704192000, 140141712580607,
+STORE, 140141695795200, 140141704187903,
+STORE, 140141687402496, 140141704187903,
+SNULL, 140141712584704, 140141771329535,
+STORE, 140141771329536, 140141788114943,
+STORE, 140141712584704, 140141771329535,
+SNULL, 140141771333631, 140141788114943,
+STORE, 140141771329536, 140141771333631,
+STORE, 140141771333632, 140141788114943,
+SNULL, 140141771333632, 140141779722239,
+STORE, 140141779722240, 140141788114943,
+STORE, 140141771333632, 140141779722239,
+SNULL, 140141779726335, 140141788114943,
+STORE, 140141779722240, 140141779726335,
+STORE, 140141779726336, 140141788114943,
+STORE, 140141679009792, 140141704187903,
+SNULL, 140141679013887, 140141704187903,
+STORE, 140141679009792, 140141679013887,
+STORE, 140141679013888, 140141704187903,
+STORE, 140141670617088, 140141679009791,
+SNULL, 140141670621183, 140141679009791,
+STORE, 140141670617088, 140141670621183,
+STORE, 140141670621184, 140141679009791,
+STORE, 140141662224384, 140141670617087,
+SNULL, 140141712584704, 140141737758719,
+STORE, 140141737758720, 140141771329535,
+STORE, 140141712584704, 140141737758719,
+SNULL, 140141737762815, 140141771329535,
+STORE, 140141737758720, 140141737762815,
+STORE, 140141737762816, 140141771329535,
+SNULL, 140141712584704, 140141729366015,
+STORE, 140141729366016, 140141737758719,
+STORE, 140141712584704, 140141729366015,
+SNULL, 140141729370111, 140141737758719,
+STORE, 140141729366016, 140141729370111,
+STORE, 140141729370112, 140141737758719,
+SNULL, 140141737762816, 140141746151423,
+STORE, 140141746151424, 140141771329535,
+STORE, 140141737762816, 140141746151423,
+SNULL, 140141746155519, 140141771329535,
+STORE, 140141746151424, 140141746155519,
+STORE, 140141746155520, 140141771329535,
+STORE, 140141653831680, 140141670617087,
+SNULL, 140141746155520, 140141762936831,
+STORE, 140141762936832, 140141771329535,
+STORE, 140141746155520, 140141762936831,
+SNULL, 140141762940927, 140141771329535,
+STORE, 140141762936832, 140141762940927,
+STORE, 140141762940928, 140141771329535,
+STORE, 140141645438976, 140141670617087,
+SNULL, 140141645443071, 140141670617087,
+STORE, 140141645438976, 140141645443071,
+STORE, 140141645443072, 140141670617087,
+SNULL, 140141712584704, 140141720973311,
+STORE, 140141720973312, 140141729366015,
+STORE, 140141712584704, 140141720973311,
+SNULL, 140141720977407, 140141729366015,
+STORE, 140141720973312, 140141720977407,
+STORE, 140141720977408, 140141729366015,
+STORE, 140141637046272, 140141645438975,
+SNULL, 140141637050367, 140141645438975,
+STORE, 140141637046272, 140141637050367,
+STORE, 140141637050368, 140141645438975,
+STORE, 140141628653568, 140141637046271,
+SNULL, 140141628657663, 140141637046271,
+STORE, 140141628653568, 140141628657663,
+STORE, 140141628657664, 140141637046271,
+STORE, 140141620260864, 140141628653567,
+SNULL, 140141679013888, 140141687402495,
+STORE, 140141687402496, 140141704187903,
+STORE, 140141679013888, 140141687402495,
+SNULL, 140141687406591, 140141704187903,
+STORE, 140141687402496, 140141687406591,
+STORE, 140141687406592, 140141704187903,
+SNULL, 140141746155520, 140141754544127,
+STORE, 140141754544128, 140141762936831,
+STORE, 140141746155520, 140141754544127,
+SNULL, 140141754548223, 140141762936831,
+STORE, 140141754544128, 140141754548223,
+STORE, 140141754548224, 140141762936831,
+SNULL, 140141687406592, 140141695795199,
+STORE, 140141695795200, 140141704187903,
+STORE, 140141687406592, 140141695795199,
+SNULL, 140141695799295, 140141704187903,
+STORE, 140141695795200, 140141695799295,
+STORE, 140141695799296, 140141704187903,
+STORE, 140141611868160, 140141628653567,
+SNULL, 140141611872255, 140141628653567,
+STORE, 140141611868160, 140141611872255,
+STORE, 140141611872256, 140141628653567,
+SNULL, 140141645443072, 140141662224383,
+STORE, 140141662224384, 140141670617087,
+STORE, 140141645443072, 140141662224383,
+SNULL, 140141662228479, 140141670617087,
+STORE, 140141662224384, 140141662228479,
+STORE, 140141662228480, 140141670617087,
+STORE, 140141603475456, 140141611868159,
+SNULL, 140141603479551, 140141611868159,
+STORE, 140141603475456, 140141603479551,
+STORE, 140141603479552, 140141611868159,
+STORE, 140141595082752, 140141603475455,
+SNULL, 140141645443072, 140141653831679,
+STORE, 140141653831680, 140141662224383,
+STORE, 140141645443072, 140141653831679,
+SNULL, 140141653835775, 140141662224383,
+STORE, 140141653831680, 140141653835775,
+STORE, 140141653835776, 140141662224383,
+STORE, 140141586690048, 140141603475455,
+SNULL, 140141611872256, 140141620260863,
+STORE, 140141620260864, 140141628653567,
+STORE, 140141611872256, 140141620260863,
+SNULL, 140141620264959, 140141628653567,
+STORE, 140141620260864, 140141620264959,
+STORE, 140141620264960, 140141628653567,
+SNULL, 140141586690048, 140141595082751,
+STORE, 140141595082752, 140141603475455,
+STORE, 140141586690048, 140141595082751,
+SNULL, 140141595086847, 140141603475455,
+STORE, 140141595082752, 140141595086847,
+STORE, 140141595086848, 140141603475455,
+STORE, 140141578297344, 140141595082751,
+SNULL, 140141578301439, 140141595082751,
+STORE, 140141578297344, 140141578301439,
+STORE, 140141578301440, 140141595082751,
+SNULL, 140141578301440, 140141586690047,
+STORE, 140141586690048, 140141595082751,
+STORE, 140141578301440, 140141586690047,
+SNULL, 140141586694143, 140141595082751,
+STORE, 140141586690048, 140141586694143,
+STORE, 140141586694144, 140141595082751,
+STORE, 140143370027008, 140143370055679,
+STORE, 140143309254656, 140143311446015,
+SNULL, 140143309254656, 140143309344767,
+STORE, 140143309344768, 140143311446015,
+STORE, 140143309254656, 140143309344767,
+SNULL, 140143311437823, 140143311446015,
+STORE, 140143309344768, 140143311437823,
+STORE, 140143311437824, 140143311446015,
+ERASE, 140143311437824, 140143311446015,
+STORE, 140143311437824, 140143311446015,
+SNULL, 140143311441919, 140143311446015,
+STORE, 140143311437824, 140143311441919,
+STORE, 140143311441920, 140143311446015,
+ERASE, 140143370027008, 140143370055679,
+ERASE, 140142912180224, 140142912184319,
+ERASE, 140142912184320, 140142920572927,
+ERASE, 140142945751040, 140142945755135,
+ERASE, 140142945755136, 140142954143743,
+ERASE, 140142090121216, 140142090125311,
+ERASE, 140142090125312, 140142098513919,
+ERASE, 140142794747904, 140142794751999,
+ERASE, 140142794752000, 140142803140607,
+ERASE, 140141913939968, 140141913944063,
+ERASE, 140141913944064, 140141922332671,
+ERASE, 140141746151424, 140141746155519,
+ERASE, 140141746155520, 140141754544127,
+ERASE, 140142954143744, 140142954147839,
+ERASE, 140142954147840, 140142962536447,
+ERASE, 140142081728512, 140142081732607,
+ERASE, 140142081732608, 140142090121215,
+ERASE, 140141905547264, 140141905551359,
+ERASE, 140141905551360, 140141913939967,
+ERASE, 140141729366016, 140141729370111,
+ERASE, 140141729370112, 140141737758719,
+ERASE, 140142920572928, 140142920577023,
+ERASE, 140142920577024, 140142928965631,
+ERASE, 140142039764992, 140142039769087,
+ERASE, 140142039769088, 140142048157695,
+ERASE, 140141679009792, 140141679013887,
+ERASE, 140141679013888, 140141687402495,
+ERASE, 140142551490560, 140142551494655,
+ERASE, 140142551494656, 140142559883263,
+ERASE, 140141947510784, 140141947514879,
+ERASE, 140141947514880, 140141955903487,
+ERASE, 140141771329536, 140141771333631,
+ERASE, 140141771333632, 140141779722239,
+ERASE, 140142928965632, 140142928969727,
+ERASE, 140142928969728, 140142937358335,
+ERASE, 140142073335808, 140142073339903,
+ERASE, 140142073339904, 140142081728511,
+ERASE, 140142543097856, 140142543101951,
+ERASE, 140142543101952, 140142551490559,
+ERASE, 140141955903488, 140141955907583,
+ERASE, 140141955907584, 140141964296191,
+ERASE, 140141704187904, 140141704191999,
+ERASE, 140141704192000, 140141712580607,
+ERASE, 140142786355200, 140142786359295,
+ERASE, 140142786359296, 140142794747903,
+ERASE, 140142056550400, 140142056554495,
+ERASE, 140142056554496, 140142064943103,
+ERASE, 140142828318720, 140142828322815,
+ERASE, 140142828322816, 140142836711423,
+ERASE, 140141788114944, 140141788119039,
+ERASE, 140141788119040, 140141796507647,
+ERASE, 140141695795200, 140141695799295,
+ERASE, 140141695799296, 140141704187903,
+ERASE, 140141578297344, 140141578301439,
+ERASE, 140141578301440, 140141586690047,
+ERASE, 140141611868160, 140141611872255,
+ERASE, 140141611872256, 140141620260863,
+ERASE, 140142811533312, 140142811537407,
+ERASE, 140142811537408, 140142819926015,
+ERASE, 140142064943104, 140142064947199,
+ERASE, 140142064947200, 140142073335807,
+ERASE, 140141628653568, 140141628657663,
+ERASE, 140141628657664, 140141637046271,
+ERASE, 140143046397952, 140143046402047,
+ERASE, 140143046402048, 140143054790655,
+ERASE, 140141796507648, 140141796511743,
+ERASE, 140141796511744, 140141804900351,
+ERASE, 140142803140608, 140142803144703,
+ERASE, 140142803144704, 140142811533311,
+ERASE, 140142509527040, 140142509531135,
+ERASE, 140142509531136, 140142517919743,
+ERASE, 140141821685760, 140141821689855,
+ERASE, 140141821689856, 140141830078463,
+ERASE, 140142777962496, 140142777966591,
+ERASE, 140142777966592, 140142786355199,
+ERASE, 140141804900352, 140141804904447,
+ERASE, 140141804904448, 140141813293055,
+ERASE, 140141930725376, 140141930729471,
+ERASE, 140141930729472, 140141939118079,
+ERASE, 140142937358336, 140142937362431,
+ERASE, 140142937362432, 140142945751039,
+ERASE, 140142559883264, 140142559887359,
+ERASE, 140142559887360, 140142568275967,
+ERASE, 140142534705152, 140142534709247,
+ERASE, 140142534709248, 140142543097855,
+ERASE, 140142048157696, 140142048161791,
+ERASE, 140142048161792, 140142056550399,
+ERASE, 140141754544128, 140141754548223,
+ERASE, 140141754548224, 140141762936831,
+ERASE, 140141939118080, 140141939122175,
+ERASE, 140141939122176, 140141947510783,
+ERASE, 140141653831680, 140141653835775,
+ERASE, 140141653835776, 140141662224383,
+ERASE, 140141712580608, 140141712584703,
+ERASE, 140141712584704, 140141720973311,
+ERASE, 140141645438976, 140141645443071,
+ERASE, 140141645443072, 140141653831679,
+ERASE, 140141687402496, 140141687406591,
+ERASE, 140141687406592, 140141695795199,
+ERASE, 140141662224384, 140141662228479,
+ERASE, 140141662228480, 140141670617087,
+ERASE, 140141922332672, 140141922336767,
+ERASE, 140141922336768, 140141930725375,
+ERASE, 140141737758720, 140141737762815,
+ERASE, 140141737762816, 140141746151423,
+ERASE, 140141637046272, 140141637050367,
+ERASE, 140141637050368, 140141645438975,
+ERASE, 140142517919744, 140142517923839,
+ERASE, 140142517923840, 140142526312447,
+ERASE, 140143096754176, 140143096758271,
+ERASE, 140143096758272, 140143105146879,
+ERASE, 140141595082752, 140141595086847,
+ERASE, 140141595086848, 140141603475455,
+ERASE, 140141762936832, 140141762940927,
+ERASE, 140141762940928, 140141771329535,
+ERASE, 140143311446016, 140143311450111,
+ERASE, 140143311450112, 140143319838719,
+ERASE, 140142526312448, 140142526316543,
+ERASE, 140142526316544, 140142534705151,
+ERASE, 140142819926016, 140142819930111,
+ERASE, 140142819930112, 140142828318719,
+ERASE, 140143180615680, 140143180619775,
+ERASE, 140143180619776, 140143189008383,
+ERASE, 140142962536448, 140142962540543,
+ERASE, 140142962540544, 140142970929151,
+ERASE, 140143214186496, 140143214190591,
+ERASE, 140143214190592, 140143222579199,
+ERASE, 140143088361472, 140143088365567,
+ERASE, 140143088365568, 140143096754175,
+ERASE, 140141586690048, 140141586694143,
+ERASE, 140141586694144, 140141595082751,
+ERASE, 140143230971904, 140143230975999,
+ERASE, 140143230976000, 140143239364607,
+ERASE, 140141779722240, 140141779726335,
+ERASE, 140141779726336, 140141788114943,
+ERASE, 140141670617088, 140141670621183,
+ERASE, 140141670621184, 140141679009791,
+ERASE, 140141813293056, 140141813297151,
+ERASE, 140141813297152, 140141821685759,
+ERASE, 140143222579200, 140143222583295,
+ERASE, 140143222583296, 140143230971903,
+ERASE, 140143189008384, 140143189012479,
+ERASE, 140143189012480, 140143197401087,
+ERASE, 140143071576064, 140143071580159,
+ERASE, 140143071580160, 140143079968767,
+ERASE, 140141620260864, 140141620264959,
+ERASE, 140141620264960, 140141628653567,
+ERASE, 140141603475456, 140141603479551,
+ERASE, 140141603479552, 140141611868159,
+ERASE, 140141720973312, 140141720977407,
+ERASE, 140141720977408, 140141729366015,
+ERASE, 140143079968768, 140143079972863,
+ERASE, 140143079972864, 140143088361471,
+ERASE, 140143205793792, 140143205797887,
+ERASE, 140143205797888, 140143214186495,
+ };
+ unsigned long set30[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140733436743680, 140737488351231,
+SNULL, 140733436747775, 140737488351231,
+STORE, 140733436743680, 140733436747775,
+STORE, 140733436612608, 140733436747775,
+STORE, 94630728904704, 94630731157503,
+SNULL, 94630729035775, 94630731157503,
+STORE, 94630728904704, 94630729035775,
+STORE, 94630729035776, 94630731157503,
+ERASE, 94630729035776, 94630731157503,
+STORE, 94630731128832, 94630731137023,
+STORE, 94630731137024, 94630731157503,
+STORE, 140165750841344, 140165753094143,
+SNULL, 140165750984703, 140165753094143,
+STORE, 140165750841344, 140165750984703,
+STORE, 140165750984704, 140165753094143,
+ERASE, 140165750984704, 140165753094143,
+STORE, 140165753081856, 140165753090047,
+STORE, 140165753090048, 140165753094143,
+STORE, 140733436887040, 140733436891135,
+STORE, 140733436874752, 140733436887039,
+STORE, 140165753053184, 140165753081855,
+STORE, 140165753044992, 140165753053183,
+STORE, 140165748625408, 140165750841343,
+SNULL, 140165748625408, 140165748723711,
+STORE, 140165748723712, 140165750841343,
+STORE, 140165748625408, 140165748723711,
+SNULL, 140165750816767, 140165750841343,
+STORE, 140165748723712, 140165750816767,
+STORE, 140165750816768, 140165750841343,
+SNULL, 140165750816768, 140165750824959,
+STORE, 140165750824960, 140165750841343,
+STORE, 140165750816768, 140165750824959,
+ERASE, 140165750816768, 140165750824959,
+STORE, 140165750816768, 140165750824959,
+ERASE, 140165750824960, 140165750841343,
+STORE, 140165750824960, 140165750841343,
+STORE, 140165744828416, 140165748625407,
+SNULL, 140165744828416, 140165746487295,
+STORE, 140165746487296, 140165748625407,
+STORE, 140165744828416, 140165746487295,
+SNULL, 140165748584447, 140165748625407,
+STORE, 140165746487296, 140165748584447,
+STORE, 140165748584448, 140165748625407,
+SNULL, 140165748584448, 140165748609023,
+STORE, 140165748609024, 140165748625407,
+STORE, 140165748584448, 140165748609023,
+ERASE, 140165748584448, 140165748609023,
+STORE, 140165748584448, 140165748609023,
+ERASE, 140165748609024, 140165748625407,
+STORE, 140165748609024, 140165748625407,
+STORE, 140165753036800, 140165753053183,
+SNULL, 140165748600831, 140165748609023,
+STORE, 140165748584448, 140165748600831,
+STORE, 140165748600832, 140165748609023,
+SNULL, 140165750820863, 140165750824959,
+STORE, 140165750816768, 140165750820863,
+STORE, 140165750820864, 140165750824959,
+SNULL, 94630731132927, 94630731137023,
+STORE, 94630731128832, 94630731132927,
+STORE, 94630731132928, 94630731137023,
+SNULL, 140165753085951, 140165753090047,
+STORE, 140165753081856, 140165753085951,
+STORE, 140165753085952, 140165753090047,
+ERASE, 140165753053184, 140165753081855,
+STORE, 94630743547904, 94630743683071,
+STORE, 140165736435712, 140165744828415,
+SNULL, 140165736439807, 140165744828415,
+STORE, 140165736435712, 140165736439807,
+STORE, 140165736439808, 140165744828415,
+STORE, 140165728043008, 140165736435711,
+STORE, 140165593825280, 140165728043007,
+SNULL, 140165593825280, 140165653725183,
+STORE, 140165653725184, 140165728043007,
+STORE, 140165593825280, 140165653725183,
+ERASE, 140165593825280, 140165653725183,
+SNULL, 140165720834047, 140165728043007,
+STORE, 140165653725184, 140165720834047,
+STORE, 140165720834048, 140165728043007,
+ERASE, 140165720834048, 140165728043007,
+SNULL, 140165653860351, 140165720834047,
+STORE, 140165653725184, 140165653860351,
+STORE, 140165653860352, 140165720834047,
+SNULL, 140165728047103, 140165736435711,
+STORE, 140165728043008, 140165728047103,
+STORE, 140165728047104, 140165736435711,
+STORE, 140165645332480, 140165653725183,
+SNULL, 140165645336575, 140165653725183,
+STORE, 140165645332480, 140165645336575,
+STORE, 140165645336576, 140165653725183,
+STORE, 140165636939776, 140165645332479,
+SNULL, 140165636943871, 140165645332479,
+STORE, 140165636939776, 140165636943871,
+STORE, 140165636943872, 140165645332479,
+STORE, 140165628547072, 140165636939775,
+SNULL, 140165628551167, 140165636939775,
+STORE, 140165628547072, 140165628551167,
+STORE, 140165628551168, 140165636939775,
+STORE, 140165620154368, 140165628547071,
+STORE, 140165611761664, 140165628547071,
+STORE, 140165603368960, 140165628547071,
+STORE, 140165469151232, 140165603368959,
+SNULL, 140165469151232, 140165519507455,
+STORE, 140165519507456, 140165603368959,
+STORE, 140165469151232, 140165519507455,
+ERASE, 140165469151232, 140165519507455,
+SNULL, 140165586616319, 140165603368959,
+STORE, 140165519507456, 140165586616319,
+STORE, 140165586616320, 140165603368959,
+ERASE, 140165586616320, 140165603368959,
+STORE, 140165594976256, 140165628547071,
+STORE, 140165385289728, 140165586616319,
+SNULL, 140165452398591, 140165586616319,
+STORE, 140165385289728, 140165452398591,
+STORE, 140165452398592, 140165586616319,
+SNULL, 140165452398592, 140165519507455,
+STORE, 140165519507456, 140165586616319,
+STORE, 140165452398592, 140165519507455,
+ERASE, 140165452398592, 140165519507455,
+STORE, 140165251072000, 140165452398591,
+SNULL, 140165318180863, 140165452398591,
+STORE, 140165251072000, 140165318180863,
+STORE, 140165318180864, 140165452398591,
+SNULL, 140165318180864, 140165385289727,
+STORE, 140165385289728, 140165452398591,
+STORE, 140165318180864, 140165385289727,
+ERASE, 140165318180864, 140165385289727,
+SNULL, 140165519642623, 140165586616319,
+STORE, 140165519507456, 140165519642623,
+STORE, 140165519642624, 140165586616319,
+SNULL, 140165594976256, 140165611761663,
+STORE, 140165611761664, 140165628547071,
+STORE, 140165594976256, 140165611761663,
+SNULL, 140165611765759, 140165628547071,
+STORE, 140165611761664, 140165611765759,
+STORE, 140165611765760, 140165628547071,
+STORE, 140165385289728, 140165519507455,
+SNULL, 140165385424895, 140165519507455,
+STORE, 140165385289728, 140165385424895,
+STORE, 140165385424896, 140165519507455,
+SNULL, 140165594976256, 140165603368959,
+STORE, 140165603368960, 140165611761663,
+STORE, 140165594976256, 140165603368959,
+SNULL, 140165603373055, 140165611761663,
+STORE, 140165603368960, 140165603373055,
+STORE, 140165603373056, 140165611761663,
+SNULL, 140165251207167, 140165318180863,
+STORE, 140165251072000, 140165251207167,
+STORE, 140165251207168, 140165318180863,
+STORE, 140165376897024, 140165385289727,
+SNULL, 140165376901119, 140165385289727,
+STORE, 140165376897024, 140165376901119,
+STORE, 140165376901120, 140165385289727,
+SNULL, 140165385424896, 140165452398591,
+STORE, 140165452398592, 140165519507455,
+STORE, 140165385424896, 140165452398591,
+SNULL, 140165452533759, 140165519507455,
+STORE, 140165452398592, 140165452533759,
+STORE, 140165452533760, 140165519507455,
+STORE, 140165368504320, 140165376897023,
+SNULL, 140165594980351, 140165603368959,
+STORE, 140165594976256, 140165594980351,
+STORE, 140165594980352, 140165603368959,
+SNULL, 140165368508415, 140165376897023,
+STORE, 140165368504320, 140165368508415,
+STORE, 140165368508416, 140165376897023,
+SNULL, 140165611765760, 140165620154367,
+STORE, 140165620154368, 140165628547071,
+STORE, 140165611765760, 140165620154367,
+SNULL, 140165620158463, 140165628547071,
+STORE, 140165620154368, 140165620158463,
+STORE, 140165620158464, 140165628547071,
+STORE, 140165360111616, 140165368504319,
+STORE, 140165351718912, 140165368504319,
+STORE, 140165343326208, 140165368504319,
+SNULL, 140165343326208, 140165351718911,
+STORE, 140165351718912, 140165368504319,
+STORE, 140165343326208, 140165351718911,
+SNULL, 140165351723007, 140165368504319,
+STORE, 140165351718912, 140165351723007,
+STORE, 140165351723008, 140165368504319,
+SNULL, 140165343330303, 140165351718911,
+STORE, 140165343326208, 140165343330303,
+STORE, 140165343330304, 140165351718911,
+SNULL, 140165351723008, 140165360111615,
+STORE, 140165360111616, 140165368504319,
+STORE, 140165351723008, 140165360111615,
+SNULL, 140165360115711, 140165368504319,
+STORE, 140165360111616, 140165360115711,
+STORE, 140165360115712, 140165368504319,
+STORE, 140165334933504, 140165343326207,
+SNULL, 140165334937599, 140165343326207,
+STORE, 140165334933504, 140165334937599,
+STORE, 140165334937600, 140165343326207,
+STORE, 140165326540800, 140165334933503,
+STORE, 140165242679296, 140165251071999,
+SNULL, 140165242683391, 140165251071999,
+STORE, 140165242679296, 140165242683391,
+STORE, 140165242683392, 140165251071999,
+STORE, 140165234286592, 140165242679295,
+STORE, 140165225893888, 140165242679295,
+SNULL, 140165225897983, 140165242679295,
+STORE, 140165225893888, 140165225897983,
+STORE, 140165225897984, 140165242679295,
+SNULL, 140165225897984, 140165234286591,
+STORE, 140165234286592, 140165242679295,
+STORE, 140165225897984, 140165234286591,
+SNULL, 140165234290687, 140165242679295,
+STORE, 140165234286592, 140165234290687,
+STORE, 140165234290688, 140165242679295,
+SNULL, 140165326544895, 140165334933503,
+STORE, 140165326540800, 140165326544895,
+STORE, 140165326544896, 140165334933503,
+STORE, 140165217501184, 140165225893887,
+STORE, 140165209108480, 140165225893887,
+SNULL, 140165209108480, 140165217501183,
+STORE, 140165217501184, 140165225893887,
+STORE, 140165209108480, 140165217501183,
+SNULL, 140165217505279, 140165225893887,
+STORE, 140165217501184, 140165217505279,
+STORE, 140165217505280, 140165225893887,
+SNULL, 140165209112575, 140165217501183,
+STORE, 140165209108480, 140165209112575,
+STORE, 140165209112576, 140165217501183,
+STORE, 140165200715776, 140165209108479,
+STORE, 140165066498048, 140165200715775,
+SNULL, 140165066498048, 140165116854271,
+STORE, 140165116854272, 140165200715775,
+STORE, 140165066498048, 140165116854271,
+ERASE, 140165066498048, 140165116854271,
+SNULL, 140165183963135, 140165200715775,
+STORE, 140165116854272, 140165183963135,
+STORE, 140165183963136, 140165200715775,
+ERASE, 140165183963136, 140165200715775,
+SNULL, 140165116989439, 140165183963135,
+STORE, 140165116854272, 140165116989439,
+STORE, 140165116989440, 140165183963135,
+STORE, 140165192323072, 140165209108479,
+STORE, 140165108461568, 140165116854271,
+STORE, 140164974243840, 140165108461567,
+STORE, 140164965851136, 140164974243839,
+SNULL, 140164974243840, 140164982636543,
+STORE, 140164982636544, 140165108461567,
+STORE, 140164974243840, 140164982636543,
+ERASE, 140164974243840, 140164982636543,
+STORE, 140164965851136, 140164982636543,
+STORE, 140164957458432, 140164982636543,
+STORE, 140164949065728, 140164982636543,
+STORE, 140164940673024, 140164982636543,
+STORE, 140164806455296, 140164940673023,
+STORE, 140164798062592, 140164806455295,
+STORE, 140164789669888, 140164806455295,
+STORE, 140164655452160, 140164789669887,
+STORE, 140164647059456, 140164655452159,
+STORE, 140164638666752, 140164655452159,
+SNULL, 140164655452160, 140164714201087,
+STORE, 140164714201088, 140164789669887,
+STORE, 140164655452160, 140164714201087,
+ERASE, 140164655452160, 140164714201087,
+STORE, 140164705808384, 140164714201087,
+STORE, 140164697415680, 140164714201087,
+STORE, 140164504449024, 140164638666751,
+SNULL, 140164504449024, 140164512874495,
+STORE, 140164512874496, 140164638666751,
+STORE, 140164504449024, 140164512874495,
+ERASE, 140164504449024, 140164512874495,
+STORE, 140164689022976, 140164714201087,
+STORE, 140164680630272, 140164714201087,
+SNULL, 140164680634367, 140164714201087,
+STORE, 140164680630272, 140164680634367,
+STORE, 140164680634368, 140164714201087,
+STORE, 140164378656768, 140164638666751,
+SNULL, 140165192323072, 140165200715775,
+STORE, 140165200715776, 140165209108479,
+STORE, 140165192323072, 140165200715775,
+SNULL, 140165200719871, 140165209108479,
+STORE, 140165200715776, 140165200719871,
+STORE, 140165200719872, 140165209108479,
+SNULL, 140165049745407, 140165108461567,
+STORE, 140164982636544, 140165049745407,
+STORE, 140165049745408, 140165108461567,
+ERASE, 140165049745408, 140165108461567,
+SNULL, 140164982771711, 140165049745407,
+STORE, 140164982636544, 140164982771711,
+STORE, 140164982771712, 140165049745407,
+STORE, 140164244439040, 140164638666751,
+SNULL, 140164311547903, 140164638666751,
+STORE, 140164244439040, 140164311547903,
+STORE, 140164311547904, 140164638666751,
+SNULL, 140164311547904, 140164378656767,
+STORE, 140164378656768, 140164638666751,
+STORE, 140164311547904, 140164378656767,
+ERASE, 140164311547904, 140164378656767,
+SNULL, 140164806455296, 140164848418815,
+STORE, 140164848418816, 140164940673023,
+STORE, 140164806455296, 140164848418815,
+ERASE, 140164806455296, 140164848418815,
+SNULL, 140164915527679, 140164940673023,
+STORE, 140164848418816, 140164915527679,
+STORE, 140164915527680, 140164940673023,
+ERASE, 140164915527680, 140164940673023,
+STORE, 140164110221312, 140164311547903,
+SNULL, 140164177330175, 140164311547903,
+STORE, 140164110221312, 140164177330175,
+STORE, 140164177330176, 140164311547903,
+SNULL, 140164177330176, 140164244439039,
+STORE, 140164244439040, 140164311547903,
+STORE, 140164177330176, 140164244439039,
+ERASE, 140164177330176, 140164244439039,
+SNULL, 140164781309951, 140164789669887,
+STORE, 140164714201088, 140164781309951,
+STORE, 140164781309952, 140164789669887,
+ERASE, 140164781309952, 140164789669887,
+STORE, 140163976003584, 140164177330175,
+SNULL, 140164043112447, 140164177330175,
+STORE, 140163976003584, 140164043112447,
+STORE, 140164043112448, 140164177330175,
+SNULL, 140164043112448, 140164110221311,
+STORE, 140164110221312, 140164177330175,
+STORE, 140164043112448, 140164110221311,
+ERASE, 140164043112448, 140164110221311,
+SNULL, 140164579983359, 140164638666751,
+STORE, 140164378656768, 140164579983359,
+STORE, 140164579983360, 140164638666751,
+ERASE, 140164579983360, 140164638666751,
+STORE, 140163841785856, 140164043112447,
+SNULL, 140163908894719, 140164043112447,
+STORE, 140163841785856, 140163908894719,
+STORE, 140163908894720, 140164043112447,
+SNULL, 140163908894720, 140163976003583,
+STORE, 140163976003584, 140164043112447,
+STORE, 140163908894720, 140163976003583,
+ERASE, 140163908894720, 140163976003583,
+SNULL, 140164940673024, 140164965851135,
+STORE, 140164965851136, 140164982636543,
+STORE, 140164940673024, 140164965851135,
+SNULL, 140164965855231, 140164982636543,
+STORE, 140164965851136, 140164965855231,
+STORE, 140164965855232, 140164982636543,
+SNULL, 140164965855232, 140164974243839,
+STORE, 140164974243840, 140164982636543,
+STORE, 140164965855232, 140164974243839,
+SNULL, 140164974247935, 140164982636543,
+STORE, 140164974243840, 140164974247935,
+STORE, 140164974247936, 140164982636543,
+SNULL, 140164445765631, 140164579983359,
+STORE, 140164378656768, 140164445765631,
+STORE, 140164445765632, 140164579983359,
+SNULL, 140164445765632, 140164512874495,
+STORE, 140164512874496, 140164579983359,
+STORE, 140164445765632, 140164512874495,
+ERASE, 140164445765632, 140164512874495,
+SNULL, 140164378791935, 140164445765631,
+STORE, 140164378656768, 140164378791935,
+STORE, 140164378791936, 140164445765631,
+SNULL, 140164789673983, 140164806455295,
+STORE, 140164789669888, 140164789673983,
+STORE, 140164789673984, 140164806455295,
+SNULL, 140164789673984, 140164798062591,
+STORE, 140164798062592, 140164806455295,
+STORE, 140164789673984, 140164798062591,
+SNULL, 140164798066687, 140164806455295,
+STORE, 140164798062592, 140164798066687,
+STORE, 140164798066688, 140164806455295,
+SNULL, 140164638670847, 140164655452159,
+STORE, 140164638666752, 140164638670847,
+STORE, 140164638670848, 140164655452159,
+STORE, 140165100068864, 140165116854271,
+STORE, 140165091676160, 140165116854271,
+STORE, 140165083283456, 140165116854271,
+SNULL, 140164244574207, 140164311547903,
+STORE, 140164244439040, 140164244574207,
+STORE, 140164244574208, 140164311547903,
+SNULL, 140164848553983, 140164915527679,
+STORE, 140164848418816, 140164848553983,
+STORE, 140164848553984, 140164915527679,
+SNULL, 140164110356479, 140164177330175,
+STORE, 140164110221312, 140164110356479,
+STORE, 140164110356480, 140164177330175,
+SNULL, 140164714336255, 140164781309951,
+STORE, 140164714201088, 140164714336255,
+STORE, 140164714336256, 140164781309951,
+SNULL, 140163976138751, 140164043112447,
+STORE, 140163976003584, 140163976138751,
+STORE, 140163976138752, 140164043112447,
+SNULL, 140164513009663, 140164579983359,
+STORE, 140164512874496, 140164513009663,
+STORE, 140164513009664, 140164579983359,
+SNULL, 140163841921023, 140163908894719,
+STORE, 140163841785856, 140163841921023,
+STORE, 140163841921024, 140163908894719,
+SNULL, 140165083283456, 140165100068863,
+STORE, 140165100068864, 140165116854271,
+STORE, 140165083283456, 140165100068863,
+SNULL, 140165100072959, 140165116854271,
+STORE, 140165100068864, 140165100072959,
+STORE, 140165100072960, 140165116854271,
+SNULL, 140165100072960, 140165108461567,
+STORE, 140165108461568, 140165116854271,
+STORE, 140165100072960, 140165108461567,
+SNULL, 140165108465663, 140165116854271,
+STORE, 140165108461568, 140165108465663,
+STORE, 140165108465664, 140165116854271,
+STORE, 140165074890752, 140165100068863,
+SNULL, 140165074894847, 140165100068863,
+STORE, 140165074890752, 140165074894847,
+STORE, 140165074894848, 140165100068863,
+STORE, 140165066498048, 140165074890751,
+STORE, 140165058105344, 140165074890751,
+STORE, 140164932280320, 140164965851135,
+SNULL, 140165192327167, 140165200715775,
+STORE, 140165192323072, 140165192327167,
+STORE, 140165192327168, 140165200715775,
+STORE, 140164923887616, 140164965851135,
+SNULL, 140164923891711, 140164965851135,
+STORE, 140164923887616, 140164923891711,
+STORE, 140164923891712, 140164965851135,
+SNULL, 140164680634368, 140164705808383,
+STORE, 140164705808384, 140164714201087,
+STORE, 140164680634368, 140164705808383,
+SNULL, 140164705812479, 140164714201087,
+STORE, 140164705808384, 140164705812479,
+STORE, 140164705812480, 140164714201087,
+SNULL, 140164680634368, 140164697415679,
+STORE, 140164697415680, 140164705808383,
+STORE, 140164680634368, 140164697415679,
+SNULL, 140164697419775, 140164705808383,
+STORE, 140164697415680, 140164697419775,
+STORE, 140164697419776, 140164705808383,
+STORE, 140164840026112, 140164848418815,
+STORE, 140164831633408, 140164848418815,
+STORE, 140164823240704, 140164848418815,
+SNULL, 140165074894848, 140165083283455,
+STORE, 140165083283456, 140165100068863,
+STORE, 140165074894848, 140165083283455,
+SNULL, 140165083287551, 140165100068863,
+STORE, 140165083283456, 140165083287551,
+STORE, 140165083287552, 140165100068863,
+SNULL, 140165083287552, 140165091676159,
+STORE, 140165091676160, 140165100068863,
+STORE, 140165083287552, 140165091676159,
+SNULL, 140165091680255, 140165100068863,
+STORE, 140165091676160, 140165091680255,
+STORE, 140165091680256, 140165100068863,
+SNULL, 140164638670848, 140164647059455,
+STORE, 140164647059456, 140164655452159,
+STORE, 140164638670848, 140164647059455,
+SNULL, 140164647063551, 140164655452159,
+STORE, 140164647059456, 140164647063551,
+STORE, 140164647063552, 140164655452159,
+SNULL, 140164923891712, 140164940673023,
+STORE, 140164940673024, 140164965851135,
+STORE, 140164923891712, 140164940673023,
+SNULL, 140164940677119, 140164965851135,
+STORE, 140164940673024, 140164940677119,
+STORE, 140164940677120, 140164965851135,
+SNULL, 140164940677120, 140164949065727,
+STORE, 140164949065728, 140164965851135,
+STORE, 140164940677120, 140164949065727,
+SNULL, 140164949069823, 140164965851135,
+STORE, 140164949065728, 140164949069823,
+STORE, 140164949069824, 140164965851135,
+SNULL, 140164949069824, 140164957458431,
+STORE, 140164957458432, 140164965851135,
+STORE, 140164949069824, 140164957458431,
+SNULL, 140164957462527, 140164965851135,
+STORE, 140164957458432, 140164957462527,
+STORE, 140164957462528, 140164965851135,
+SNULL, 140164680634368, 140164689022975,
+STORE, 140164689022976, 140164697415679,
+STORE, 140164680634368, 140164689022975,
+SNULL, 140164689027071, 140164697415679,
+STORE, 140164689022976, 140164689027071,
+STORE, 140164689027072, 140164697415679,
+STORE, 140164814848000, 140164848418815,
+SNULL, 140165058105344, 140165066498047,
+STORE, 140165066498048, 140165074890751,
+STORE, 140165058105344, 140165066498047,
+SNULL, 140165066502143, 140165074890751,
+STORE, 140165066498048, 140165066502143,
+STORE, 140165066502144, 140165074890751,
+SNULL, 140165058109439, 140165066498047,
+STORE, 140165058105344, 140165058109439,
+STORE, 140165058109440, 140165066498047,
+STORE, 140164798066688, 140164814847999,
+SNULL, 140164798066688, 140164806455295,
+STORE, 140164806455296, 140164814847999,
+STORE, 140164798066688, 140164806455295,
+SNULL, 140164806459391, 140164814847999,
+STORE, 140164806455296, 140164806459391,
+STORE, 140164806459392, 140164814847999,
+SNULL, 140164923891712, 140164932280319,
+STORE, 140164932280320, 140164940673023,
+STORE, 140164923891712, 140164932280319,
+SNULL, 140164932284415, 140164940673023,
+STORE, 140164932280320, 140164932284415,
+STORE, 140164932284416, 140164940673023,
+STORE, 140164672237568, 140164680630271,
+STORE, 140164663844864, 140164680630271,
+STORE, 140164647063552, 140164680630271,
+SNULL, 140164647063552, 140164655452159,
+STORE, 140164655452160, 140164680630271,
+STORE, 140164647063552, 140164655452159,
+SNULL, 140164655456255, 140164680630271,
+STORE, 140164655452160, 140164655456255,
+STORE, 140164655456256, 140164680630271,
+STORE, 140164630274048, 140164638666751,
+SNULL, 140164814852095, 140164848418815,
+STORE, 140164814848000, 140164814852095,
+STORE, 140164814852096, 140164848418815,
+SNULL, 140164814852096, 140164831633407,
+STORE, 140164831633408, 140164848418815,
+STORE, 140164814852096, 140164831633407,
+SNULL, 140164831637503, 140164848418815,
+STORE, 140164831633408, 140164831637503,
+STORE, 140164831637504, 140164848418815,
+STORE, 140164621881344, 140164638666751,
+SNULL, 140164831637504, 140164840026111,
+STORE, 140164840026112, 140164848418815,
+STORE, 140164831637504, 140164840026111,
+SNULL, 140164840030207, 140164848418815,
+STORE, 140164840026112, 140164840030207,
+STORE, 140164840030208, 140164848418815,
+STORE, 140164613488640, 140164638666751,
+SNULL, 140164613492735, 140164638666751,
+STORE, 140164613488640, 140164613492735,
+STORE, 140164613492736, 140164638666751,
+STORE, 140164605095936, 140164613488639,
+SNULL, 140164605100031, 140164613488639,
+STORE, 140164605095936, 140164605100031,
+STORE, 140164605100032, 140164613488639,
+STORE, 140164596703232, 140164605095935,
+STORE, 140164588310528, 140164605095935,
+SNULL, 140164588314623, 140164605095935,
+STORE, 140164588310528, 140164588314623,
+STORE, 140164588314624, 140164605095935,
+STORE, 140164504481792, 140164512874495,
+STORE, 140164496089088, 140164512874495,
+SNULL, 140164496089088, 140164504481791,
+STORE, 140164504481792, 140164512874495,
+STORE, 140164496089088, 140164504481791,
+SNULL, 140164504485887, 140164512874495,
+STORE, 140164504481792, 140164504485887,
+STORE, 140164504485888, 140164512874495,
+SNULL, 140164613492736, 140164630274047,
+STORE, 140164630274048, 140164638666751,
+STORE, 140164613492736, 140164630274047,
+SNULL, 140164630278143, 140164638666751,
+STORE, 140164630274048, 140164630278143,
+STORE, 140164630278144, 140164638666751,
+STORE, 140164487696384, 140164504481791,
+STORE, 140164479303680, 140164504481791,
+SNULL, 140164814852096, 140164823240703,
+STORE, 140164823240704, 140164831633407,
+STORE, 140164814852096, 140164823240703,
+SNULL, 140164823244799, 140164831633407,
+STORE, 140164823240704, 140164823244799,
+STORE, 140164823244800, 140164831633407,
+STORE, 140164470910976, 140164504481791,
+SNULL, 140164470910976, 140164496089087,
+STORE, 140164496089088, 140164504481791,
+STORE, 140164470910976, 140164496089087,
+SNULL, 140164496093183, 140164504481791,
+STORE, 140164496089088, 140164496093183,
+STORE, 140164496093184, 140164504481791,
+SNULL, 140164655456256, 140164672237567,
+STORE, 140164672237568, 140164680630271,
+STORE, 140164655456256, 140164672237567,
+SNULL, 140164672241663, 140164680630271,
+STORE, 140164672237568, 140164672241663,
+STORE, 140164672241664, 140164680630271,
+STORE, 140164462518272, 140164496089087,
+STORE, 140164454125568, 140164496089087,
+SNULL, 140164655456256, 140164663844863,
+STORE, 140164663844864, 140164672237567,
+STORE, 140164655456256, 140164663844863,
+SNULL, 140164663848959, 140164672237567,
+STORE, 140164663844864, 140164663848959,
+STORE, 140164663848960, 140164672237567,
+STORE, 140164370264064, 140164378656767,
+STORE, 140164361871360, 140164378656767,
+STORE, 140164353478656, 140164378656767,
+STORE, 140164345085952, 140164378656767,
+SNULL, 140164345085952, 140164353478655,
+STORE, 140164353478656, 140164378656767,
+STORE, 140164345085952, 140164353478655,
+SNULL, 140164353482751, 140164378656767,
+STORE, 140164353478656, 140164353482751,
+STORE, 140164353482752, 140164378656767,
+SNULL, 140164454125568, 140164487696383,
+STORE, 140164487696384, 140164496089087,
+STORE, 140164454125568, 140164487696383,
+SNULL, 140164487700479, 140164496089087,
+STORE, 140164487696384, 140164487700479,
+STORE, 140164487700480, 140164496089087,
+STORE, 140164336693248, 140164353478655,
+SNULL, 140164336697343, 140164353478655,
+STORE, 140164336693248, 140164336697343,
+STORE, 140164336697344, 140164353478655,
+STORE, 140164328300544, 140164336693247,
+SNULL, 140164454125568, 140164479303679,
+STORE, 140164479303680, 140164487696383,
+STORE, 140164454125568, 140164479303679,
+SNULL, 140164479307775, 140164487696383,
+STORE, 140164479303680, 140164479307775,
+STORE, 140164479307776, 140164487696383,
+STORE, 140164319907840, 140164336693247,
+STORE, 140164236046336, 140164244439039,
+SNULL, 140164588314624, 140164596703231,
+STORE, 140164596703232, 140164605095935,
+STORE, 140164588314624, 140164596703231,
+SNULL, 140164596707327, 140164605095935,
+STORE, 140164596703232, 140164596707327,
+STORE, 140164596707328, 140164605095935,
+SNULL, 140164454125568, 140164462518271,
+STORE, 140164462518272, 140164479303679,
+STORE, 140164454125568, 140164462518271,
+SNULL, 140164462522367, 140164479303679,
+STORE, 140164462518272, 140164462522367,
+STORE, 140164462522368, 140164479303679,
+STORE, 140164227653632, 140164244439039,
+SNULL, 140164227657727, 140164244439039,
+STORE, 140164227653632, 140164227657727,
+STORE, 140164227657728, 140164244439039,
+SNULL, 140164462522368, 140164470910975,
+STORE, 140164470910976, 140164479303679,
+STORE, 140164462522368, 140164470910975,
+SNULL, 140164470915071, 140164479303679,
+STORE, 140164470910976, 140164470915071,
+STORE, 140164470915072, 140164479303679,
+SNULL, 140164613492736, 140164621881343,
+STORE, 140164621881344, 140164630274047,
+STORE, 140164613492736, 140164621881343,
+SNULL, 140164621885439, 140164630274047,
+STORE, 140164621881344, 140164621885439,
+STORE, 140164621885440, 140164630274047,
+SNULL, 140164353482752, 140164370264063,
+STORE, 140164370264064, 140164378656767,
+STORE, 140164353482752, 140164370264063,
+SNULL, 140164370268159, 140164378656767,
+STORE, 140164370264064, 140164370268159,
+STORE, 140164370268160, 140164378656767,
+STORE, 140164219260928, 140164227653631,
+SNULL, 140164319911935, 140164336693247,
+STORE, 140164319907840, 140164319911935,
+STORE, 140164319911936, 140164336693247,
+SNULL, 140164336697344, 140164345085951,
+STORE, 140164345085952, 140164353478655,
+STORE, 140164336697344, 140164345085951,
+SNULL, 140164345090047, 140164353478655,
+STORE, 140164345085952, 140164345090047,
+STORE, 140164345090048, 140164353478655,
+SNULL, 140164319911936, 140164328300543,
+STORE, 140164328300544, 140164336693247,
+STORE, 140164319911936, 140164328300543,
+SNULL, 140164328304639, 140164336693247,
+STORE, 140164328300544, 140164328304639,
+STORE, 140164328304640, 140164336693247,
+SNULL, 140164454129663, 140164462518271,
+STORE, 140164454125568, 140164454129663,
+STORE, 140164454129664, 140164462518271,
+STORE, 140164210868224, 140164227653631,
+STORE, 140164202475520, 140164227653631,
+STORE, 140164194082816, 140164227653631,
+SNULL, 140164194086911, 140164227653631,
+STORE, 140164194082816, 140164194086911,
+STORE, 140164194086912, 140164227653631,
+SNULL, 140164353482752, 140164361871359,
+STORE, 140164361871360, 140164370264063,
+STORE, 140164353482752, 140164361871359,
+SNULL, 140164361875455, 140164370264063,
+STORE, 140164361871360, 140164361875455,
+STORE, 140164361875456, 140164370264063,
+SNULL, 140164227657728, 140164236046335,
+STORE, 140164236046336, 140164244439039,
+STORE, 140164227657728, 140164236046335,
+SNULL, 140164236050431, 140164244439039,
+STORE, 140164236046336, 140164236050431,
+STORE, 140164236050432, 140164244439039,
+STORE, 140164185690112, 140164194082815,
+SNULL, 140164194086912, 140164219260927,
+STORE, 140164219260928, 140164227653631,
+STORE, 140164194086912, 140164219260927,
+SNULL, 140164219265023, 140164227653631,
+STORE, 140164219260928, 140164219265023,
+STORE, 140164219265024, 140164227653631,
+STORE, 140164101828608, 140164110221311,
+STORE, 140164093435904, 140164110221311,
+STORE, 140164085043200, 140164110221311,
+SNULL, 140164085047295, 140164110221311,
+STORE, 140164085043200, 140164085047295,
+STORE, 140164085047296, 140164110221311,
+STORE, 140164076650496, 140164085043199,
+SNULL, 140164185694207, 140164194082815,
+STORE, 140164185690112, 140164185694207,
+STORE, 140164185694208, 140164194082815,
+SNULL, 140164085047296, 140164101828607,
+STORE, 140164101828608, 140164110221311,
+STORE, 140164085047296, 140164101828607,
+SNULL, 140164101832703, 140164110221311,
+STORE, 140164101828608, 140164101832703,
+STORE, 140164101832704, 140164110221311,
+SNULL, 140164085047296, 140164093435903,
+STORE, 140164093435904, 140164101828607,
+STORE, 140164085047296, 140164093435903,
+SNULL, 140164093439999, 140164101828607,
+STORE, 140164093435904, 140164093439999,
+STORE, 140164093440000, 140164101828607,
+SNULL, 140164194086912, 140164202475519,
+STORE, 140164202475520, 140164219260927,
+STORE, 140164194086912, 140164202475519,
+SNULL, 140164202479615, 140164219260927,
+STORE, 140164202475520, 140164202479615,
+STORE, 140164202479616, 140164219260927,
+SNULL, 140164202479616, 140164210868223,
+STORE, 140164210868224, 140164219260927,
+STORE, 140164202479616, 140164210868223,
+SNULL, 140164210872319, 140164219260927,
+STORE, 140164210868224, 140164210872319,
+STORE, 140164210872320, 140164219260927,
+SNULL, 140164076654591, 140164085043199,
+STORE, 140164076650496, 140164076654591,
+STORE, 140164076654592, 140164085043199,
+STORE, 140164068257792, 140164076650495,
+SNULL, 140164068261887, 140164076650495,
+STORE, 140164068257792, 140164068261887,
+STORE, 140164068261888, 140164076650495,
+STORE, 140165753053184, 140165753081855,
+STORE, 140165725851648, 140165728043007,
+SNULL, 140165725851648, 140165725941759,
+STORE, 140165725941760, 140165728043007,
+STORE, 140165725851648, 140165725941759,
+SNULL, 140165728034815, 140165728043007,
+STORE, 140165725941760, 140165728034815,
+STORE, 140165728034816, 140165728043007,
+ERASE, 140165728034816, 140165728043007,
+STORE, 140165728034816, 140165728043007,
+SNULL, 140165728038911, 140165728043007,
+STORE, 140165728034816, 140165728038911,
+STORE, 140165728038912, 140165728043007,
+ERASE, 140165753053184, 140165753081855,
+ERASE, 140164638666752, 140164638670847,
+ERASE, 140164638670848, 140164647059455,
+ERASE, 140165091676160, 140165091680255,
+ERASE, 140165091680256, 140165100068863,
+ERASE, 140164613488640, 140164613492735,
+ERASE, 140164613492736, 140164621881343,
+ERASE, 140164319907840, 140164319911935,
+ERASE, 140164319911936, 140164328300543,
+ERASE, 140165620154368, 140165620158463,
+ERASE, 140165620158464, 140165628547071,
+ERASE, 140164798062592, 140164798066687,
+ERASE, 140164798066688, 140164806455295,
+ERASE, 140164789669888, 140164789673983,
+ERASE, 140164789673984, 140164798062591,
+ERASE, 140164965851136, 140164965855231,
+ERASE, 140164965855232, 140164974243839,
+ERASE, 140165074890752, 140165074894847,
+ERASE, 140165074894848, 140165083283455,
+ERASE, 140164672237568, 140164672241663,
+ERASE, 140164672241664, 140164680630271,
+ERASE, 140164454125568, 140164454129663,
+ERASE, 140164454129664, 140164462518271,
+ERASE, 140165200715776, 140165200719871,
+ERASE, 140165200719872, 140165209108479,
+ERASE, 140164932280320, 140164932284415,
+ERASE, 140164932284416, 140164940673023,
+ERASE, 140164663844864, 140164663848959,
+ERASE, 140164663848960, 140164672237567,
+ERASE, 140164697415680, 140164697419775,
+ERASE, 140164697419776, 140164705808383,
+ERASE, 140164831633408, 140164831637503,
+ERASE, 140164831637504, 140164840026111,
+ERASE, 140165192323072, 140165192327167,
+ERASE, 140165192327168, 140165200715775,
+ERASE, 140165108461568, 140165108465663,
+ERASE, 140165108465664, 140165116854271,
+ERASE, 140164840026112, 140164840030207,
+ERASE, 140164840030208, 140164848418815,
+ERASE, 140164647059456, 140164647063551,
+ERASE, 140164647063552, 140164655452159,
+ERASE, 140165083283456, 140165083287551,
+ERASE, 140165083287552, 140165091676159,
+ERASE, 140164923887616, 140164923891711,
+ERASE, 140164923891712, 140164932280319,
+ERASE, 140164823240704, 140164823244799,
+ERASE, 140164823244800, 140164831633407,
+ERASE, 140164227653632, 140164227657727,
+ERASE, 140164227657728, 140164236046335,
+ERASE, 140164957458432, 140164957462527,
+ERASE, 140164957462528, 140164965851135,
+ERASE, 140164680630272, 140164680634367,
+ERASE, 140164680634368, 140164689022975,
+ERASE, 140164974243840, 140164974247935,
+ERASE, 140164974247936, 140164982636543,
+ERASE, 140165066498048, 140165066502143,
+ERASE, 140165066502144, 140165074890751,
+ERASE, 140164621881344, 140164621885439,
+ERASE, 140164621885440, 140164630274047,
+ERASE, 140164949065728, 140164949069823,
+ERASE, 140164949069824, 140164957458431,
+ERASE, 140164588310528, 140164588314623,
+ERASE, 140164588314624, 140164596703231,
+ERASE, 140164806455296, 140164806459391,
+ERASE, 140164806459392, 140164814847999,
+ERASE, 140164940673024, 140164940677119,
+ERASE, 140164940677120, 140164949065727,
+ERASE, 140164596703232, 140164596707327,
+ERASE, 140164596707328, 140164605095935,
+ERASE, 140164605095936, 140164605100031,
+ERASE, 140164605100032, 140164613488639,
+ERASE, 140164655452160, 140164655456255,
+ERASE, 140164655456256, 140164663844863,
+ERASE, 140164705808384, 140164705812479,
+ERASE, 140164705812480, 140164714201087,
+ERASE, 140164689022976, 140164689027071,
+ERASE, 140164689027072, 140164697415679,
+ERASE, 140164630274048, 140164630278143,
+ERASE, 140164630278144, 140164638666751,
+ERASE, 140164479303680, 140164479307775,
+ERASE, 140164479307776, 140164487696383,
+ERASE, 140164236046336, 140164236050431,
+ERASE, 140164236050432, 140164244439039,
+ERASE, 140164085043200, 140164085047295,
+ERASE, 140164085047296, 140164093435903,
+ERASE, 140164345085952, 140164345090047,
+ERASE, 140164345090048, 140164353478655,
+ERASE, 140164101828608, 140164101832703,
+ERASE, 140164101832704, 140164110221311,
+ERASE, 140164370264064, 140164370268159,
+ERASE, 140164370268160, 140164378656767,
+ERASE, 140164336693248, 140164336697343,
+ERASE, 140164336697344, 140164345085951,
+ERASE, 140164194082816, 140164194086911,
+ERASE, 140164194086912, 140164202475519,
+ERASE, 140164353478656, 140164353482751,
+ERASE, 140164353482752, 140164361871359,
+ERASE, 140164210868224, 140164210872319,
+ERASE, 140164210872320, 140164219260927,
+ERASE, 140164814848000, 140164814852095,
+ERASE, 140164814852096, 140164823240703,
+ERASE, 140164504481792, 140164504485887,
+ERASE, 140164504485888, 140164512874495,
+ERASE, 140165100068864, 140165100072959,
+ERASE, 140165100072960, 140165108461567,
+ERASE, 140164361871360, 140164361875455,
+ERASE, 140164361875456, 140164370264063,
+ERASE, 140164470910976, 140164470915071,
+ERASE, 140164470915072, 140164479303679,
+ERASE, 140164076650496, 140164076654591,
+ERASE, 140164076654592, 140164085043199,
+ERASE, 140164202475520, 140164202479615,
+ERASE, 140164202479616, 140164210868223,
+ERASE, 140164462518272, 140164462522367,
+ERASE, 140164462522368, 140164470910975,
+ERASE, 140165351718912, 140165351723007,
+ERASE, 140165351723008, 140165360111615,
+ERASE, 140164328300544, 140164328304639,
+ERASE, 140164328304640, 140164336693247,
+ERASE, 140164093435904, 140164093439999,
+ERASE, 140164093440000, 140164101828607,
+ERASE, 140165603368960, 140165603373055,
+ERASE, 140165603373056, 140165611761663,
+ERASE, 140165368504320, 140165368508415,
+ERASE, 140165368508416, 140165376897023,
+ERASE, 140165334933504, 140165334937599,
+ERASE, 140165334937600, 140165343326207,
+ERASE, 140165594976256, 140165594980351,
+ERASE, 140165594980352, 140165603368959,
+ERASE, 140164487696384, 140164487700479,
+ERASE, 140164487700480, 140164496089087,
+ERASE, 140164219260928, 140164219265023,
+ERASE, 140164219265024, 140164227653631,
+ERASE, 140164185690112, 140164185694207,
+ERASE, 140164185694208, 140164194082815,
+ERASE, 140164068257792, 140164068261887,
+ERASE, 140164068261888, 140164076650495,
+ERASE, 140165225893888, 140165225897983,
+ERASE, 140165225897984, 140165234286591,
+ERASE, 140165058105344, 140165058109439,
+ };
+ unsigned long set31[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730890784768, 140737488351231,
+SNULL, 140730890788863, 140737488351231,
+STORE, 140730890784768, 140730890788863,
+STORE, 140730890653696, 140730890788863,
+STORE, 94577123659776, 94577125912575,
+SNULL, 94577123790847, 94577125912575,
+STORE, 94577123659776, 94577123790847,
+STORE, 94577123790848, 94577125912575,
+ERASE, 94577123790848, 94577125912575,
+STORE, 94577125883904, 94577125892095,
+STORE, 94577125892096, 94577125912575,
+STORE, 140624060407808, 140624062660607,
+SNULL, 140624060551167, 140624062660607,
+STORE, 140624060407808, 140624060551167,
+STORE, 140624060551168, 140624062660607,
+ERASE, 140624060551168, 140624062660607,
+STORE, 140624062648320, 140624062656511,
+STORE, 140624062656512, 140624062660607,
+STORE, 140730892140544, 140730892144639,
+STORE, 140730892128256, 140730892140543,
+STORE, 140624062619648, 140624062648319,
+STORE, 140624062611456, 140624062619647,
+STORE, 140624058191872, 140624060407807,
+SNULL, 140624058191872, 140624058290175,
+STORE, 140624058290176, 140624060407807,
+STORE, 140624058191872, 140624058290175,
+SNULL, 140624060383231, 140624060407807,
+STORE, 140624058290176, 140624060383231,
+STORE, 140624060383232, 140624060407807,
+SNULL, 140624060383232, 140624060391423,
+STORE, 140624060391424, 140624060407807,
+STORE, 140624060383232, 140624060391423,
+ERASE, 140624060383232, 140624060391423,
+STORE, 140624060383232, 140624060391423,
+ERASE, 140624060391424, 140624060407807,
+STORE, 140624060391424, 140624060407807,
+STORE, 140624054394880, 140624058191871,
+SNULL, 140624054394880, 140624056053759,
+STORE, 140624056053760, 140624058191871,
+STORE, 140624054394880, 140624056053759,
+SNULL, 140624058150911, 140624058191871,
+STORE, 140624056053760, 140624058150911,
+STORE, 140624058150912, 140624058191871,
+SNULL, 140624058150912, 140624058175487,
+STORE, 140624058175488, 140624058191871,
+STORE, 140624058150912, 140624058175487,
+ERASE, 140624058150912, 140624058175487,
+STORE, 140624058150912, 140624058175487,
+ERASE, 140624058175488, 140624058191871,
+STORE, 140624058175488, 140624058191871,
+STORE, 140624062603264, 140624062619647,
+SNULL, 140624058167295, 140624058175487,
+STORE, 140624058150912, 140624058167295,
+STORE, 140624058167296, 140624058175487,
+SNULL, 140624060387327, 140624060391423,
+STORE, 140624060383232, 140624060387327,
+STORE, 140624060387328, 140624060391423,
+SNULL, 94577125887999, 94577125892095,
+STORE, 94577125883904, 94577125887999,
+STORE, 94577125888000, 94577125892095,
+SNULL, 140624062652415, 140624062656511,
+STORE, 140624062648320, 140624062652415,
+STORE, 140624062652416, 140624062656511,
+ERASE, 140624062619648, 140624062648319,
+STORE, 94577157709824, 94577157844991,
+STORE, 140624046002176, 140624054394879,
+SNULL, 140624046006271, 140624054394879,
+STORE, 140624046002176, 140624046006271,
+STORE, 140624046006272, 140624054394879,
+STORE, 140624037609472, 140624046002175,
+STORE, 140623903391744, 140624037609471,
+SNULL, 140623903391744, 140623940157439,
+STORE, 140623940157440, 140624037609471,
+STORE, 140623903391744, 140623940157439,
+ERASE, 140623903391744, 140623940157439,
+SNULL, 140624007266303, 140624037609471,
+STORE, 140623940157440, 140624007266303,
+STORE, 140624007266304, 140624037609471,
+ERASE, 140624007266304, 140624037609471,
+SNULL, 140623940292607, 140624007266303,
+STORE, 140623940157440, 140623940292607,
+STORE, 140623940292608, 140624007266303,
+SNULL, 140624037613567, 140624046002175,
+STORE, 140624037609472, 140624037613567,
+STORE, 140624037613568, 140624046002175,
+STORE, 140624029216768, 140624037609471,
+SNULL, 140624029220863, 140624037609471,
+STORE, 140624029216768, 140624029220863,
+STORE, 140624029220864, 140624037609471,
+STORE, 140624020824064, 140624029216767,
+SNULL, 140624020828159, 140624029216767,
+STORE, 140624020824064, 140624020828159,
+STORE, 140624020828160, 140624029216767,
+STORE, 140624012431360, 140624020824063,
+SNULL, 140624012435455, 140624020824063,
+STORE, 140624012431360, 140624012435455,
+STORE, 140624012435456, 140624020824063,
+STORE, 140623931764736, 140623940157439,
+STORE, 140623797547008, 140623931764735,
+SNULL, 140623797547008, 140623805939711,
+STORE, 140623805939712, 140623931764735,
+STORE, 140623797547008, 140623805939711,
+ERASE, 140623797547008, 140623805939711,
+SNULL, 140623873048575, 140623931764735,
+STORE, 140623805939712, 140623873048575,
+STORE, 140623873048576, 140623931764735,
+ERASE, 140623873048576, 140623931764735,
+STORE, 140623923372032, 140623940157439,
+STORE, 140623914979328, 140623940157439,
+STORE, 140623906586624, 140623940157439,
+STORE, 140623671721984, 140623873048575,
+SNULL, 140623738830847, 140623873048575,
+STORE, 140623671721984, 140623738830847,
+STORE, 140623738830848, 140623873048575,
+SNULL, 140623738830848, 140623805939711,
+STORE, 140623805939712, 140623873048575,
+STORE, 140623738830848, 140623805939711,
+ERASE, 140623738830848, 140623805939711,
+SNULL, 140623806074879, 140623873048575,
+STORE, 140623805939712, 140623806074879,
+STORE, 140623806074880, 140623873048575,
+SNULL, 140623906586624, 140623931764735,
+STORE, 140623931764736, 140623940157439,
+STORE, 140623906586624, 140623931764735,
+SNULL, 140623931768831, 140623940157439,
+STORE, 140623931764736, 140623931768831,
+STORE, 140623931768832, 140623940157439,
+STORE, 140623537504256, 140623738830847,
+SNULL, 140623537504256, 140623671721983,
+STORE, 140623671721984, 140623738830847,
+STORE, 140623537504256, 140623671721983,
+SNULL, 140623671857151, 140623738830847,
+STORE, 140623671721984, 140623671857151,
+STORE, 140623671857152, 140623738830847,
+SNULL, 140623604613119, 140623671721983,
+STORE, 140623537504256, 140623604613119,
+STORE, 140623604613120, 140623671721983,
+ERASE, 140623604613120, 140623671721983,
+SNULL, 140623537639423, 140623604613119,
+STORE, 140623537504256, 140623537639423,
+STORE, 140623537639424, 140623604613119,
+STORE, 140623537639424, 140623671721983,
+SNULL, 140623537639424, 140623604613119,
+STORE, 140623604613120, 140623671721983,
+STORE, 140623537639424, 140623604613119,
+SNULL, 140623604748287, 140623671721983,
+STORE, 140623604613120, 140623604748287,
+STORE, 140623604748288, 140623671721983,
+STORE, 140623898193920, 140623931764735,
+SNULL, 140623898193920, 140623923372031,
+STORE, 140623923372032, 140623931764735,
+STORE, 140623898193920, 140623923372031,
+SNULL, 140623923376127, 140623931764735,
+STORE, 140623923372032, 140623923376127,
+STORE, 140623923376128, 140623931764735,
+STORE, 140623889801216, 140623923372031,
+SNULL, 140623889801216, 140623898193919,
+STORE, 140623898193920, 140623923372031,
+STORE, 140623889801216, 140623898193919,
+SNULL, 140623898198015, 140623923372031,
+STORE, 140623898193920, 140623898198015,
+STORE, 140623898198016, 140623923372031,
+SNULL, 140623889805311, 140623898193919,
+STORE, 140623889801216, 140623889805311,
+STORE, 140623889805312, 140623898193919,
+SNULL, 140623898198016, 140623906586623,
+STORE, 140623906586624, 140623923372031,
+STORE, 140623898198016, 140623906586623,
+SNULL, 140623906590719, 140623923372031,
+STORE, 140623906586624, 140623906590719,
+STORE, 140623906590720, 140623923372031,
+STORE, 140623881408512, 140623889801215,
+SNULL, 140623906590720, 140623914979327,
+STORE, 140623914979328, 140623923372031,
+STORE, 140623906590720, 140623914979327,
+SNULL, 140623914983423, 140623923372031,
+STORE, 140623914979328, 140623914983423,
+STORE, 140623914983424, 140623923372031,
+SNULL, 140623881412607, 140623889801215,
+STORE, 140623881408512, 140623881412607,
+STORE, 140623881412608, 140623889801215,
+STORE, 140623797547008, 140623805939711,
+STORE, 140623789154304, 140623805939711,
+STORE, 140623780761600, 140623805939711,
+SNULL, 140623780761600, 140623789154303,
+STORE, 140623789154304, 140623805939711,
+STORE, 140623780761600, 140623789154303,
+SNULL, 140623789158399, 140623805939711,
+STORE, 140623789154304, 140623789158399,
+STORE, 140623789158400, 140623805939711,
+STORE, 140623772368896, 140623789154303,
+STORE, 140623763976192, 140623789154303,
+SNULL, 140623763976192, 140623780761599,
+STORE, 140623780761600, 140623789154303,
+STORE, 140623763976192, 140623780761599,
+SNULL, 140623780765695, 140623789154303,
+STORE, 140623780761600, 140623780765695,
+STORE, 140623780765696, 140623789154303,
+SNULL, 140623789158400, 140623797547007,
+STORE, 140623797547008, 140623805939711,
+STORE, 140623789158400, 140623797547007,
+SNULL, 140623797551103, 140623805939711,
+STORE, 140623797547008, 140623797551103,
+STORE, 140623797551104, 140623805939711,
+SNULL, 140623763976192, 140623772368895,
+STORE, 140623772368896, 140623780761599,
+STORE, 140623763976192, 140623772368895,
+SNULL, 140623772372991, 140623780761599,
+STORE, 140623772368896, 140623772372991,
+STORE, 140623772372992, 140623780761599,
+SNULL, 140623763980287, 140623772368895,
+STORE, 140623763976192, 140623763980287,
+STORE, 140623763980288, 140623772368895,
+STORE, 140623755583488, 140623763976191,
+STORE, 140623747190784, 140623763976191,
+SNULL, 140623747190784, 140623755583487,
+STORE, 140623755583488, 140623763976191,
+STORE, 140623747190784, 140623755583487,
+SNULL, 140623755587583, 140623763976191,
+STORE, 140623755583488, 140623755587583,
+STORE, 140623755587584, 140623763976191,
+STORE, 140623529111552, 140623537504255,
+SNULL, 140623747194879, 140623755583487,
+STORE, 140623747190784, 140623747194879,
+STORE, 140623747194880, 140623755583487,
+SNULL, 140623529115647, 140623537504255,
+STORE, 140623529111552, 140623529115647,
+STORE, 140623529115648, 140623537504255,
+STORE, 140623520718848, 140623529111551,
+SNULL, 140623520722943, 140623529111551,
+STORE, 140623520718848, 140623520722943,
+STORE, 140623520722944, 140623529111551,
+STORE, 140623512326144, 140623520718847,
+STORE, 140623503933440, 140623520718847,
+STORE, 140623495540736, 140623520718847,
+STORE, 140623361323008, 140623495540735,
+STORE, 140623227105280, 140623495540735,
+STORE, 140623218712576, 140623227105279,
+STORE, 140623084494848, 140623218712575,
+STORE, 140623076102144, 140623084494847,
+STORE, 140622941884416, 140623076102143,
+SNULL, 140622941884416, 140623000633343,
+STORE, 140623000633344, 140623076102143,
+STORE, 140622941884416, 140623000633343,
+ERASE, 140622941884416, 140623000633343,
+STORE, 140622992240640, 140623000633343,
+STORE, 140622983847936, 140623000633343,
+STORE, 140622849630208, 140622983847935,
+STORE, 140622841237504, 140622849630207,
+SNULL, 140622849630208, 140622866415615,
+STORE, 140622866415616, 140622983847935,
+STORE, 140622849630208, 140622866415615,
+ERASE, 140622849630208, 140622866415615,
+STORE, 140622858022912, 140622866415615,
+SNULL, 140622933524479, 140622983847935,
+STORE, 140622866415616, 140622933524479,
+STORE, 140622933524480, 140622983847935,
+ERASE, 140622933524480, 140622983847935,
+STORE, 140622975455232, 140623000633343,
+STORE, 140622707019776, 140622841237503,
+STORE, 140622967062528, 140623000633343,
+STORE, 140622572802048, 140622841237503,
+STORE, 140622958669824, 140623000633343,
+STORE, 140622438584320, 140622841237503,
+STORE, 140622950277120, 140623000633343,
+SNULL, 140622858027007, 140622866415615,
+STORE, 140622858022912, 140622858027007,
+STORE, 140622858027008, 140622866415615,
+STORE, 140622941884416, 140623000633343,
+STORE, 140622841237504, 140622858022911,
+SNULL, 140622841237504, 140622849630207,
+STORE, 140622849630208, 140622858022911,
+STORE, 140622841237504, 140622849630207,
+SNULL, 140622849634303, 140622858022911,
+STORE, 140622849630208, 140622849634303,
+STORE, 140622849634304, 140622858022911,
+STORE, 140622430191616, 140622438584319,
+SNULL, 140622430195711, 140622438584319,
+STORE, 140622430191616, 140622430195711,
+STORE, 140622430195712, 140622438584319,
+SNULL, 140623361323007, 140623495540735,
+STORE, 140623227105280, 140623361323007,
+STORE, 140623361323008, 140623495540735,
+SNULL, 140623361323008, 140623403286527,
+STORE, 140623403286528, 140623495540735,
+STORE, 140623361323008, 140623403286527,
+ERASE, 140623361323008, 140623403286527,
+SNULL, 140623470395391, 140623495540735,
+STORE, 140623403286528, 140623470395391,
+STORE, 140623470395392, 140623495540735,
+ERASE, 140623470395392, 140623495540735,
+SNULL, 140623227105280, 140623269068799,
+STORE, 140623269068800, 140623361323007,
+STORE, 140623227105280, 140623269068799,
+ERASE, 140623227105280, 140623269068799,
+SNULL, 140623084494848, 140623134851071,
+STORE, 140623134851072, 140623218712575,
+STORE, 140623084494848, 140623134851071,
+ERASE, 140623084494848, 140623134851071,
+SNULL, 140623201959935, 140623218712575,
+STORE, 140623134851072, 140623201959935,
+STORE, 140623201959936, 140623218712575,
+ERASE, 140623201959936, 140623218712575,
+SNULL, 140623067742207, 140623076102143,
+STORE, 140623000633344, 140623067742207,
+STORE, 140623067742208, 140623076102143,
+ERASE, 140623067742208, 140623076102143,
+STORE, 140622295973888, 140622430191615,
+SNULL, 140622295973888, 140622329544703,
+STORE, 140622329544704, 140622430191615,
+STORE, 140622295973888, 140622329544703,
+ERASE, 140622295973888, 140622329544703,
+SNULL, 140622866550783, 140622933524479,
+STORE, 140622866415616, 140622866550783,
+STORE, 140622866550784, 140622933524479,
+SNULL, 140622707019775, 140622841237503,
+STORE, 140622438584320, 140622707019775,
+STORE, 140622707019776, 140622841237503,
+SNULL, 140622707019776, 140622732197887,
+STORE, 140622732197888, 140622841237503,
+STORE, 140622707019776, 140622732197887,
+ERASE, 140622707019776, 140622732197887,
+SNULL, 140622799306751, 140622841237503,
+STORE, 140622732197888, 140622799306751,
+STORE, 140622799306752, 140622841237503,
+ERASE, 140622799306752, 140622841237503,
+SNULL, 140622572802047, 140622707019775,
+STORE, 140622438584320, 140622572802047,
+STORE, 140622572802048, 140622707019775,
+SNULL, 140622572802048, 140622597980159,
+STORE, 140622597980160, 140622707019775,
+STORE, 140622572802048, 140622597980159,
+ERASE, 140622572802048, 140622597980159,
+SNULL, 140622438584320, 140622463762431,
+STORE, 140622463762432, 140622572802047,
+STORE, 140622438584320, 140622463762431,
+ERASE, 140622438584320, 140622463762431,
+SNULL, 140622530871295, 140622572802047,
+STORE, 140622463762432, 140622530871295,
+STORE, 140622530871296, 140622572802047,
+ERASE, 140622530871296, 140622572802047,
+STORE, 140622195326976, 140622430191615,
+SNULL, 140622262435839, 140622430191615,
+STORE, 140622195326976, 140622262435839,
+STORE, 140622262435840, 140622430191615,
+SNULL, 140622262435840, 140622329544703,
+STORE, 140622329544704, 140622430191615,
+STORE, 140622262435840, 140622329544703,
+ERASE, 140622262435840, 140622329544703,
+SNULL, 140622841241599, 140622849630207,
+STORE, 140622841237504, 140622841241599,
+STORE, 140622841241600, 140622849630207,
+STORE, 140623487148032, 140623520718847,
+STORE, 140623478755328, 140623520718847,
+SNULL, 140622941884416, 140622983847935,
+STORE, 140622983847936, 140623000633343,
+STORE, 140622941884416, 140622983847935,
+SNULL, 140622983852031, 140623000633343,
+STORE, 140622983847936, 140622983852031,
+STORE, 140622983852032, 140623000633343,
+STORE, 140623394893824, 140623403286527,
+SNULL, 140623394897919, 140623403286527,
+STORE, 140623394893824, 140623394897919,
+STORE, 140623394897920, 140623403286527,
+SNULL, 140623403421695, 140623470395391,
+STORE, 140623403286528, 140623403421695,
+STORE, 140623403421696, 140623470395391,
+SNULL, 140623478755328, 140623503933439,
+STORE, 140623503933440, 140623520718847,
+STORE, 140623478755328, 140623503933439,
+SNULL, 140623503937535, 140623520718847,
+STORE, 140623503933440, 140623503937535,
+STORE, 140623503937536, 140623520718847,
+SNULL, 140623336177663, 140623361323007,
+STORE, 140623269068800, 140623336177663,
+STORE, 140623336177664, 140623361323007,
+ERASE, 140623336177664, 140623361323007,
+SNULL, 140623269203967, 140623336177663,
+STORE, 140623269068800, 140623269203967,
+STORE, 140623269203968, 140623336177663,
+SNULL, 140623134986239, 140623201959935,
+STORE, 140623134851072, 140623134986239,
+STORE, 140623134986240, 140623201959935,
+SNULL, 140623000768511, 140623067742207,
+STORE, 140623000633344, 140623000768511,
+STORE, 140623000768512, 140623067742207,
+SNULL, 140622396653567, 140622430191615,
+STORE, 140622329544704, 140622396653567,
+STORE, 140622396653568, 140622430191615,
+ERASE, 140622396653568, 140622430191615,
+SNULL, 140622732333055, 140622799306751,
+STORE, 140622732197888, 140622732333055,
+STORE, 140622732333056, 140622799306751,
+SNULL, 140622941884416, 140622975455231,
+STORE, 140622975455232, 140622983847935,
+STORE, 140622941884416, 140622975455231,
+SNULL, 140622975459327, 140622983847935,
+STORE, 140622975455232, 140622975459327,
+STORE, 140622975459328, 140622983847935,
+SNULL, 140622665089023, 140622707019775,
+STORE, 140622597980160, 140622665089023,
+STORE, 140622665089024, 140622707019775,
+ERASE, 140622665089024, 140622707019775,
+SNULL, 140622598115327, 140622665089023,
+STORE, 140622597980160, 140622598115327,
+STORE, 140622598115328, 140622665089023,
+SNULL, 140622463897599, 140622530871295,
+STORE, 140622463762432, 140622463897599,
+STORE, 140622463897600, 140622530871295,
+SNULL, 140622195462143, 140622262435839,
+STORE, 140622195326976, 140622195462143,
+STORE, 140622195462144, 140622262435839,
+STORE, 140623386501120, 140623394893823,
+SNULL, 140622941884416, 140622950277119,
+STORE, 140622950277120, 140622975455231,
+STORE, 140622941884416, 140622950277119,
+SNULL, 140622950281215, 140622975455231,
+STORE, 140622950277120, 140622950281215,
+STORE, 140622950281216, 140622975455231,
+SNULL, 140622941888511, 140622950277119,
+STORE, 140622941884416, 140622941888511,
+STORE, 140622941888512, 140622950277119,
+STORE, 140623378108416, 140623394893823,
+SNULL, 140623478755328, 140623495540735,
+STORE, 140623495540736, 140623503933439,
+STORE, 140623478755328, 140623495540735,
+SNULL, 140623495544831, 140623503933439,
+STORE, 140623495540736, 140623495544831,
+STORE, 140623495544832, 140623503933439,
+SNULL, 140623478755328, 140623487148031,
+STORE, 140623487148032, 140623495540735,
+STORE, 140623478755328, 140623487148031,
+SNULL, 140623487152127, 140623495540735,
+STORE, 140623487148032, 140623487152127,
+STORE, 140623487152128, 140623495540735,
+SNULL, 140623218716671, 140623227105279,
+STORE, 140623218712576, 140623218716671,
+STORE, 140623218716672, 140623227105279,
+SNULL, 140623076106239, 140623084494847,
+STORE, 140623076102144, 140623076106239,
+STORE, 140623076106240, 140623084494847,
+SNULL, 140622329679871, 140622396653567,
+STORE, 140622329544704, 140622329679871,
+STORE, 140622329679872, 140622396653567,
+SNULL, 140622950281216, 140622958669823,
+STORE, 140622958669824, 140622975455231,
+STORE, 140622950281216, 140622958669823,
+SNULL, 140622958673919, 140622975455231,
+STORE, 140622958669824, 140622958673919,
+STORE, 140622958673920, 140622975455231,
+SNULL, 140623503937536, 140623512326143,
+STORE, 140623512326144, 140623520718847,
+STORE, 140623503937536, 140623512326143,
+SNULL, 140623512330239, 140623520718847,
+STORE, 140623512326144, 140623512330239,
+STORE, 140623512330240, 140623520718847,
+SNULL, 140623378108416, 140623386501119,
+STORE, 140623386501120, 140623394893823,
+STORE, 140623378108416, 140623386501119,
+SNULL, 140623386505215, 140623394893823,
+STORE, 140623386501120, 140623386505215,
+STORE, 140623386505216, 140623394893823,
+STORE, 140623369715712, 140623386501119,
+STORE, 140623361323008, 140623386501119,
+STORE, 140623352930304, 140623386501119,
+SNULL, 140623352930304, 140623361323007,
+STORE, 140623361323008, 140623386501119,
+STORE, 140623352930304, 140623361323007,
+SNULL, 140623361327103, 140623386501119,
+STORE, 140623361323008, 140623361327103,
+STORE, 140623361327104, 140623386501119,
+SNULL, 140623478759423, 140623487148031,
+STORE, 140623478755328, 140623478759423,
+STORE, 140623478759424, 140623487148031,
+STORE, 140623344537600, 140623361323007,
+STORE, 140623260676096, 140623269068799,
+SNULL, 140622958673920, 140622967062527,
+STORE, 140622967062528, 140622975455231,
+STORE, 140622958673920, 140622967062527,
+SNULL, 140622967066623, 140622975455231,
+STORE, 140622967062528, 140622967066623,
+STORE, 140622967066624, 140622975455231,
+STORE, 140623252283392, 140623269068799,
+STORE, 140623243890688, 140623269068799,
+SNULL, 140622983852032, 140622992240639,
+STORE, 140622992240640, 140623000633343,
+STORE, 140622983852032, 140622992240639,
+SNULL, 140622992244735, 140623000633343,
+STORE, 140622992240640, 140622992244735,
+STORE, 140622992244736, 140623000633343,
+STORE, 140623235497984, 140623269068799,
+STORE, 140623218716672, 140623235497983,
+STORE, 140623210319872, 140623218712575,
+STORE, 140623126458368, 140623134851071,
+SNULL, 140623210323967, 140623218712575,
+STORE, 140623210319872, 140623210323967,
+STORE, 140623210323968, 140623218712575,
+SNULL, 140623218716672, 140623227105279,
+STORE, 140623227105280, 140623235497983,
+STORE, 140623218716672, 140623227105279,
+SNULL, 140623227109375, 140623235497983,
+STORE, 140623227105280, 140623227109375,
+STORE, 140623227109376, 140623235497983,
+STORE, 140623118065664, 140623134851071,
+STORE, 140623109672960, 140623134851071,
+SNULL, 140623109677055, 140623134851071,
+STORE, 140623109672960, 140623109677055,
+STORE, 140623109677056, 140623134851071,
+STORE, 140623101280256, 140623109672959,
+STORE, 140623092887552, 140623109672959,
+SNULL, 140623092887552, 140623101280255,
+STORE, 140623101280256, 140623109672959,
+STORE, 140623092887552, 140623101280255,
+SNULL, 140623101284351, 140623109672959,
+STORE, 140623101280256, 140623101284351,
+STORE, 140623101284352, 140623109672959,
+SNULL, 140623361327104, 140623378108415,
+STORE, 140623378108416, 140623386501119,
+STORE, 140623361327104, 140623378108415,
+SNULL, 140623378112511, 140623386501119,
+STORE, 140623378108416, 140623378112511,
+STORE, 140623378112512, 140623386501119,
+SNULL, 140623235497984, 140623243890687,
+STORE, 140623243890688, 140623269068799,
+STORE, 140623235497984, 140623243890687,
+SNULL, 140623243894783, 140623269068799,
+STORE, 140623243890688, 140623243894783,
+STORE, 140623243894784, 140623269068799,
+SNULL, 140623361327104, 140623369715711,
+STORE, 140623369715712, 140623378108415,
+STORE, 140623361327104, 140623369715711,
+SNULL, 140623369719807, 140623378108415,
+STORE, 140623369715712, 140623369719807,
+STORE, 140623369719808, 140623378108415,
+SNULL, 140623243894784, 140623252283391,
+STORE, 140623252283392, 140623269068799,
+STORE, 140623243894784, 140623252283391,
+SNULL, 140623252287487, 140623269068799,
+STORE, 140623252283392, 140623252287487,
+STORE, 140623252287488, 140623269068799,
+SNULL, 140623235502079, 140623243890687,
+STORE, 140623235497984, 140623235502079,
+STORE, 140623235502080, 140623243890687,
+SNULL, 140623344541695, 140623361323007,
+STORE, 140623344537600, 140623344541695,
+STORE, 140623344541696, 140623361323007,
+STORE, 140623076106240, 140623092887551,
+SNULL, 140623076106240, 140623084494847,
+STORE, 140623084494848, 140623092887551,
+STORE, 140623076106240, 140623084494847,
+SNULL, 140623084498943, 140623092887551,
+STORE, 140623084494848, 140623084498943,
+STORE, 140623084498944, 140623092887551,
+SNULL, 140623344541696, 140623352930303,
+STORE, 140623352930304, 140623361323007,
+STORE, 140623344541696, 140623352930303,
+SNULL, 140623352934399, 140623361323007,
+STORE, 140623352930304, 140623352934399,
+STORE, 140623352934400, 140623361323007,
+SNULL, 140623109677056, 140623118065663,
+STORE, 140623118065664, 140623134851071,
+STORE, 140623109677056, 140623118065663,
+SNULL, 140623118069759, 140623134851071,
+STORE, 140623118065664, 140623118069759,
+STORE, 140623118069760, 140623134851071,
+STORE, 140622832844800, 140622841237503,
+STORE, 140622824452096, 140622841237503,
+SNULL, 140622824452096, 140622832844799,
+STORE, 140622832844800, 140622841237503,
+STORE, 140622824452096, 140622832844799,
+SNULL, 140622832848895, 140622841237503,
+STORE, 140622832844800, 140622832848895,
+STORE, 140622832848896, 140622841237503,
+STORE, 140622816059392, 140622832844799,
+SNULL, 140623092891647, 140623101280255,
+STORE, 140623092887552, 140623092891647,
+STORE, 140623092891648, 140623101280255,
+SNULL, 140623118069760, 140623126458367,
+STORE, 140623126458368, 140623134851071,
+STORE, 140623118069760, 140623126458367,
+SNULL, 140623126462463, 140623134851071,
+STORE, 140623126458368, 140623126462463,
+STORE, 140623126462464, 140623134851071,
+SNULL, 140623252287488, 140623260676095,
+STORE, 140623260676096, 140623269068799,
+STORE, 140623252287488, 140623260676095,
+SNULL, 140623260680191, 140623269068799,
+STORE, 140623260676096, 140623260680191,
+STORE, 140623260680192, 140623269068799,
+STORE, 140622807666688, 140622832844799,
+STORE, 140622723805184, 140622732197887,
+STORE, 140622715412480, 140622732197887,
+STORE, 140622707019776, 140622732197887,
+SNULL, 140622707023871, 140622732197887,
+STORE, 140622707019776, 140622707023871,
+STORE, 140622707023872, 140622732197887,
+STORE, 140622698627072, 140622707019775,
+STORE, 140622690234368, 140622707019775,
+SNULL, 140622690238463, 140622707019775,
+STORE, 140622690234368, 140622690238463,
+STORE, 140622690238464, 140622707019775,
+SNULL, 140622807666688, 140622816059391,
+STORE, 140622816059392, 140622832844799,
+STORE, 140622807666688, 140622816059391,
+SNULL, 140622816063487, 140622832844799,
+STORE, 140622816059392, 140622816063487,
+STORE, 140622816063488, 140622832844799,
+STORE, 140622681841664, 140622690234367,
+STORE, 140622673448960, 140622690234367,
+SNULL, 140622673453055, 140622690234367,
+STORE, 140622673448960, 140622673453055,
+STORE, 140622673453056, 140622690234367,
+STORE, 140622589587456, 140622597980159,
+SNULL, 140622807670783, 140622816059391,
+STORE, 140622807666688, 140622807670783,
+STORE, 140622807670784, 140622816059391,
+STORE, 140622581194752, 140622597980159,
+SNULL, 140622581198847, 140622597980159,
+STORE, 140622581194752, 140622581198847,
+STORE, 140622581198848, 140622597980159,
+SNULL, 140622816063488, 140622824452095,
+STORE, 140622824452096, 140622832844799,
+STORE, 140622816063488, 140622824452095,
+SNULL, 140622824456191, 140622832844799,
+STORE, 140622824452096, 140622824456191,
+STORE, 140622824456192, 140622832844799,
+STORE, 140622572802048, 140622581194751,
+SNULL, 140622572806143, 140622581194751,
+STORE, 140622572802048, 140622572806143,
+STORE, 140622572806144, 140622581194751,
+STORE, 140622564409344, 140622572802047,
+STORE, 140622556016640, 140622572802047,
+SNULL, 140622556016640, 140622564409343,
+STORE, 140622564409344, 140622572802047,
+STORE, 140622556016640, 140622564409343,
+SNULL, 140622564413439, 140622572802047,
+STORE, 140622564409344, 140622564413439,
+STORE, 140622564413440, 140622572802047,
+SNULL, 140622690238464, 140622698627071,
+STORE, 140622698627072, 140622707019775,
+STORE, 140622690238464, 140622698627071,
+SNULL, 140622698631167, 140622707019775,
+STORE, 140622698627072, 140622698631167,
+STORE, 140622698631168, 140622707019775,
+SNULL, 140622707023872, 140622723805183,
+STORE, 140622723805184, 140622732197887,
+STORE, 140622707023872, 140622723805183,
+SNULL, 140622723809279, 140622732197887,
+STORE, 140622723805184, 140622723809279,
+STORE, 140622723809280, 140622732197887,
+SNULL, 140622707023872, 140622715412479,
+STORE, 140622715412480, 140622723805183,
+STORE, 140622707023872, 140622715412479,
+SNULL, 140622715416575, 140622723805183,
+STORE, 140622715412480, 140622715416575,
+STORE, 140622715416576, 140622723805183,
+STORE, 140622547623936, 140622564409343,
+SNULL, 140622547628031, 140622564409343,
+STORE, 140622547623936, 140622547628031,
+STORE, 140622547628032, 140622564409343,
+STORE, 140622539231232, 140622547623935,
+SNULL, 140622539235327, 140622547623935,
+STORE, 140622539231232, 140622539235327,
+STORE, 140622539235328, 140622547623935,
+SNULL, 140622581198848, 140622589587455,
+STORE, 140622589587456, 140622597980159,
+STORE, 140622581198848, 140622589587455,
+SNULL, 140622589591551, 140622597980159,
+STORE, 140622589587456, 140622589591551,
+STORE, 140622589591552, 140622597980159,
+STORE, 140622455369728, 140622463762431,
+SNULL, 140622455373823, 140622463762431,
+STORE, 140622455369728, 140622455373823,
+STORE, 140622455373824, 140622463762431,
+STORE, 140622446977024, 140622455369727,
+SNULL, 140622446981119, 140622455369727,
+STORE, 140622446977024, 140622446981119,
+STORE, 140622446981120, 140622455369727,
+SNULL, 140622547628032, 140622556016639,
+STORE, 140622556016640, 140622564409343,
+STORE, 140622547628032, 140622556016639,
+SNULL, 140622556020735, 140622564409343,
+STORE, 140622556016640, 140622556020735,
+STORE, 140622556020736, 140622564409343,
+STORE, 140622430195712, 140622446977023,
+STORE, 140622421798912, 140622430191615,
+SNULL, 140622430195712, 140622438584319,
+STORE, 140622438584320, 140622446977023,
+STORE, 140622430195712, 140622438584319,
+SNULL, 140622438588415, 140622446977023,
+STORE, 140622438584320, 140622438588415,
+STORE, 140622438588416, 140622446977023,
+STORE, 140622413406208, 140622430191615,
+STORE, 140622405013504, 140622430191615,
+SNULL, 140622405013504, 140622413406207,
+STORE, 140622413406208, 140622430191615,
+STORE, 140622405013504, 140622413406207,
+SNULL, 140622413410303, 140622430191615,
+STORE, 140622413406208, 140622413410303,
+STORE, 140622413410304, 140622430191615,
+SNULL, 140622673453056, 140622681841663,
+STORE, 140622681841664, 140622690234367,
+STORE, 140622673453056, 140622681841663,
+SNULL, 140622681845759, 140622690234367,
+STORE, 140622681841664, 140622681845759,
+STORE, 140622681845760, 140622690234367,
+STORE, 140622321152000, 140622329544703,
+SNULL, 140622413410304, 140622421798911,
+STORE, 140622421798912, 140622430191615,
+STORE, 140622413410304, 140622421798911,
+SNULL, 140622421803007, 140622430191615,
+STORE, 140622421798912, 140622421803007,
+STORE, 140622421803008, 140622430191615,
+STORE, 140622312759296, 140622329544703,
+SNULL, 140622312763391, 140622329544703,
+STORE, 140622312759296, 140622312763391,
+STORE, 140622312763392, 140622329544703,
+SNULL, 140622405017599, 140622413406207,
+STORE, 140622405013504, 140622405017599,
+STORE, 140622405017600, 140622413406207,
+STORE, 140622304366592, 140622312759295,
+SNULL, 140622304370687, 140622312759295,
+STORE, 140622304366592, 140622304370687,
+STORE, 140622304370688, 140622312759295,
+SNULL, 140622312763392, 140622321151999,
+STORE, 140622321152000, 140622329544703,
+STORE, 140622312763392, 140622321151999,
+SNULL, 140622321156095, 140622329544703,
+STORE, 140622321152000, 140622321156095,
+STORE, 140622321156096, 140622329544703,
+STORE, 140624062619648, 140624062648319,
+STORE, 140624010240000, 140624012431359,
+SNULL, 140624010240000, 140624010330111,
+STORE, 140624010330112, 140624012431359,
+STORE, 140624010240000, 140624010330111,
+SNULL, 140624012423167, 140624012431359,
+STORE, 140624010330112, 140624012423167,
+STORE, 140624012423168, 140624012431359,
+ERASE, 140624012423168, 140624012431359,
+STORE, 140624012423168, 140624012431359,
+SNULL, 140624012427263, 140624012431359,
+STORE, 140624012423168, 140624012427263,
+STORE, 140624012427264, 140624012431359,
+ERASE, 140624062619648, 140624062648319,
+ERASE, 140622849630208, 140622849634303,
+ERASE, 140622849634304, 140622858022911,
+ERASE, 140623394893824, 140623394897919,
+ERASE, 140623394897920, 140623403286527,
+ERASE, 140623361323008, 140623361327103,
+ERASE, 140623361327104, 140623369715711,
+ERASE, 140623084494848, 140623084498943,
+ERASE, 140623084498944, 140623092887551,
+ERASE, 140623931764736, 140623931768831,
+ERASE, 140623931768832, 140623940157439,
+ERASE, 140622841237504, 140622841241599,
+ERASE, 140622841241600, 140622849630207,
+ERASE, 140623487148032, 140623487152127,
+ERASE, 140623487152128, 140623495540735,
+ERASE, 140623109672960, 140623109677055,
+ERASE, 140623109677056, 140623118065663,
+ERASE, 140622983847936, 140622983852031,
+ERASE, 140622983852032, 140622992240639,
+ERASE, 140623352930304, 140623352934399,
+ERASE, 140623352934400, 140623361323007,
+ERASE, 140622564409344, 140622564413439,
+ERASE, 140622564413440, 140622572802047,
+ERASE, 140622430191616, 140622430195711,
+ERASE, 140622430195712, 140622438584319,
+ERASE, 140622958669824, 140622958673919,
+ERASE, 140622958673920, 140622967062527,
+ERASE, 140622992240640, 140622992244735,
+ERASE, 140622992244736, 140623000633343,
+ERASE, 140623227105280, 140623227109375,
+ERASE, 140623227109376, 140623235497983,
+ERASE, 140622321152000, 140622321156095,
+ERASE, 140622321156096, 140622329544703,
+ERASE, 140622858022912, 140622858027007,
+ERASE, 140622858027008, 140622866415615,
+ERASE, 140622975455232, 140622975459327,
+ERASE, 140622975459328, 140622983847935,
+ERASE, 140623378108416, 140623378112511,
+ERASE, 140623378112512, 140623386501119,
+ERASE, 140623495540736, 140623495544831,
+ERASE, 140623495544832, 140623503933439,
+ERASE, 140623118065664, 140623118069759,
+ERASE, 140623118069760, 140623126458367,
+ERASE, 140622572802048, 140622572806143,
+ERASE, 140622572806144, 140622581194751,
+ERASE, 140622421798912, 140622421803007,
+ERASE, 140622421803008, 140622430191615,
+ERASE, 140622967062528, 140622967066623,
+ERASE, 140622967066624, 140622975455231,
+ERASE, 140623252283392, 140623252287487,
+ERASE, 140623252287488, 140623260676095,
+ERASE, 140622673448960, 140622673453055,
+ERASE, 140622673453056, 140622681841663,
+ERASE, 140623076102144, 140623076106239,
+ERASE, 140623076106240, 140623084494847,
+ERASE, 140623101280256, 140623101284351,
+ERASE, 140623101284352, 140623109672959,
+ERASE, 140622715412480, 140622715416575,
+ERASE, 140622715416576, 140622723805183,
+ERASE, 140622405013504, 140622405017599,
+ERASE, 140622405017600, 140622413406207,
+ERASE, 140623478755328, 140623478759423,
+ERASE, 140623478759424, 140623487148031,
+ERASE, 140623906586624, 140623906590719,
+ERASE, 140623906590720, 140623914979327,
+ERASE, 140622950277120, 140622950281215,
+ERASE, 140622950281216, 140622958669823,
+ };
+ unsigned long set32[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731244212224, 140737488351231,
+SNULL, 140731244216319, 140737488351231,
+STORE, 140731244212224, 140731244216319,
+STORE, 140731244081152, 140731244216319,
+STORE, 94427773984768, 94427776237567,
+SNULL, 94427774115839, 94427776237567,
+STORE, 94427773984768, 94427774115839,
+STORE, 94427774115840, 94427776237567,
+ERASE, 94427774115840, 94427776237567,
+STORE, 94427776208896, 94427776217087,
+STORE, 94427776217088, 94427776237567,
+STORE, 140401464893440, 140401467146239,
+SNULL, 140401465036799, 140401467146239,
+STORE, 140401464893440, 140401465036799,
+STORE, 140401465036800, 140401467146239,
+ERASE, 140401465036800, 140401467146239,
+STORE, 140401467133952, 140401467142143,
+STORE, 140401467142144, 140401467146239,
+STORE, 140731244507136, 140731244511231,
+STORE, 140731244494848, 140731244507135,
+STORE, 140401467105280, 140401467133951,
+STORE, 140401467097088, 140401467105279,
+STORE, 140401462677504, 140401464893439,
+SNULL, 140401462677504, 140401462775807,
+STORE, 140401462775808, 140401464893439,
+STORE, 140401462677504, 140401462775807,
+SNULL, 140401464868863, 140401464893439,
+STORE, 140401462775808, 140401464868863,
+STORE, 140401464868864, 140401464893439,
+SNULL, 140401464868864, 140401464877055,
+STORE, 140401464877056, 140401464893439,
+STORE, 140401464868864, 140401464877055,
+ERASE, 140401464868864, 140401464877055,
+STORE, 140401464868864, 140401464877055,
+ERASE, 140401464877056, 140401464893439,
+STORE, 140401464877056, 140401464893439,
+STORE, 140401458880512, 140401462677503,
+SNULL, 140401458880512, 140401460539391,
+STORE, 140401460539392, 140401462677503,
+STORE, 140401458880512, 140401460539391,
+SNULL, 140401462636543, 140401462677503,
+STORE, 140401460539392, 140401462636543,
+STORE, 140401462636544, 140401462677503,
+SNULL, 140401462636544, 140401462661119,
+STORE, 140401462661120, 140401462677503,
+STORE, 140401462636544, 140401462661119,
+ERASE, 140401462636544, 140401462661119,
+STORE, 140401462636544, 140401462661119,
+ERASE, 140401462661120, 140401462677503,
+STORE, 140401462661120, 140401462677503,
+STORE, 140401467088896, 140401467105279,
+SNULL, 140401462652927, 140401462661119,
+STORE, 140401462636544, 140401462652927,
+STORE, 140401462652928, 140401462661119,
+SNULL, 140401464872959, 140401464877055,
+STORE, 140401464868864, 140401464872959,
+STORE, 140401464872960, 140401464877055,
+SNULL, 94427776212991, 94427776217087,
+STORE, 94427776208896, 94427776212991,
+STORE, 94427776212992, 94427776217087,
+SNULL, 140401467138047, 140401467142143,
+STORE, 140401467133952, 140401467138047,
+STORE, 140401467138048, 140401467142143,
+ERASE, 140401467105280, 140401467133951,
+STORE, 94427784683520, 94427784818687,
+STORE, 140401450487808, 140401458880511,
+SNULL, 140401450491903, 140401458880511,
+STORE, 140401450487808, 140401450491903,
+STORE, 140401450491904, 140401458880511,
+STORE, 140401442095104, 140401450487807,
+STORE, 140401307877376, 140401442095103,
+SNULL, 140401307877376, 140401340055551,
+STORE, 140401340055552, 140401442095103,
+STORE, 140401307877376, 140401340055551,
+ERASE, 140401307877376, 140401340055551,
+SNULL, 140401407164415, 140401442095103,
+STORE, 140401340055552, 140401407164415,
+STORE, 140401407164416, 140401442095103,
+ERASE, 140401407164416, 140401442095103,
+SNULL, 140401340190719, 140401407164415,
+STORE, 140401340055552, 140401340190719,
+STORE, 140401340190720, 140401407164415,
+SNULL, 140401442099199, 140401450487807,
+STORE, 140401442095104, 140401442099199,
+STORE, 140401442099200, 140401450487807,
+STORE, 140401433702400, 140401442095103,
+SNULL, 140401433706495, 140401442095103,
+STORE, 140401433702400, 140401433706495,
+STORE, 140401433706496, 140401442095103,
+STORE, 140401425309696, 140401433702399,
+SNULL, 140401425313791, 140401433702399,
+STORE, 140401425309696, 140401425313791,
+STORE, 140401425313792, 140401433702399,
+STORE, 140401416916992, 140401425309695,
+SNULL, 140401416921087, 140401425309695,
+STORE, 140401416916992, 140401416921087,
+STORE, 140401416921088, 140401425309695,
+STORE, 140401408524288, 140401416916991,
+STORE, 140401205837824, 140401340055551,
+SNULL, 140401272946687, 140401340055551,
+STORE, 140401205837824, 140401272946687,
+STORE, 140401272946688, 140401340055551,
+ERASE, 140401272946688, 140401340055551,
+SNULL, 140401205972991, 140401272946687,
+STORE, 140401205837824, 140401205972991,
+STORE, 140401205972992, 140401272946687,
+STORE, 140401331662848, 140401340055551,
+STORE, 140401323270144, 140401340055551,
+STORE, 140401138728960, 140401205837823,
+STORE, 140401314877440, 140401340055551,
+SNULL, 140401408528383, 140401416916991,
+STORE, 140401408524288, 140401408528383,
+STORE, 140401408528384, 140401416916991,
+SNULL, 140401138864127, 140401205837823,
+STORE, 140401138728960, 140401138864127,
+STORE, 140401138864128, 140401205837823,
+STORE, 140401004511232, 140401138728959,
+SNULL, 140401071620095, 140401138728959,
+STORE, 140401004511232, 140401071620095,
+STORE, 140401071620096, 140401138728959,
+ERASE, 140401071620096, 140401138728959,
+STORE, 140400870293504, 140401071620095,
+SNULL, 140400937402367, 140401071620095,
+STORE, 140400870293504, 140400937402367,
+STORE, 140400937402368, 140401071620095,
+SNULL, 140400937402368, 140401004511231,
+STORE, 140401004511232, 140401071620095,
+STORE, 140400937402368, 140401004511231,
+ERASE, 140400937402368, 140401004511231,
+STORE, 140401306484736, 140401340055551,
+SNULL, 140401306484736, 140401323270143,
+STORE, 140401323270144, 140401340055551,
+STORE, 140401306484736, 140401323270143,
+SNULL, 140401323274239, 140401340055551,
+STORE, 140401323270144, 140401323274239,
+STORE, 140401323274240, 140401340055551,
+SNULL, 140401004646399, 140401071620095,
+STORE, 140401004511232, 140401004646399,
+STORE, 140401004646400, 140401071620095,
+SNULL, 140400870428671, 140400937402367,
+STORE, 140400870293504, 140400870428671,
+STORE, 140400870428672, 140400937402367,
+SNULL, 140401306488831, 140401323270143,
+STORE, 140401306484736, 140401306488831,
+STORE, 140401306488832, 140401323270143,
+STORE, 140401298092032, 140401306484735,
+SNULL, 140401306488832, 140401314877439,
+STORE, 140401314877440, 140401323270143,
+STORE, 140401306488832, 140401314877439,
+SNULL, 140401314881535, 140401323270143,
+STORE, 140401314877440, 140401314881535,
+STORE, 140401314881536, 140401323270143,
+SNULL, 140401323274240, 140401331662847,
+STORE, 140401331662848, 140401340055551,
+STORE, 140401323274240, 140401331662847,
+SNULL, 140401331666943, 140401340055551,
+STORE, 140401331662848, 140401331666943,
+STORE, 140401331666944, 140401340055551,
+SNULL, 140401298096127, 140401306484735,
+STORE, 140401298092032, 140401298096127,
+STORE, 140401298096128, 140401306484735,
+STORE, 140401289699328, 140401298092031,
+STORE, 140401281306624, 140401298092031,
+STORE, 140401130336256, 140401138728959,
+SNULL, 140401281306624, 140401289699327,
+STORE, 140401289699328, 140401298092031,
+STORE, 140401281306624, 140401289699327,
+SNULL, 140401289703423, 140401298092031,
+STORE, 140401289699328, 140401289703423,
+STORE, 140401289703424, 140401298092031,
+STORE, 140401121943552, 140401138728959,
+STORE, 140401113550848, 140401138728959,
+SNULL, 140401281310719, 140401289699327,
+STORE, 140401281306624, 140401281310719,
+STORE, 140401281310720, 140401289699327,
+SNULL, 140401113550848, 140401121943551,
+STORE, 140401121943552, 140401138728959,
+STORE, 140401113550848, 140401121943551,
+SNULL, 140401121947647, 140401138728959,
+STORE, 140401121943552, 140401121947647,
+STORE, 140401121947648, 140401138728959,
+STORE, 140401105158144, 140401121943551,
+SNULL, 140401121947648, 140401130336255,
+STORE, 140401130336256, 140401138728959,
+STORE, 140401121947648, 140401130336255,
+SNULL, 140401130340351, 140401138728959,
+STORE, 140401130336256, 140401130340351,
+STORE, 140401130340352, 140401138728959,
+STORE, 140401096765440, 140401121943551,
+SNULL, 140401096765440, 140401113550847,
+STORE, 140401113550848, 140401121943551,
+STORE, 140401096765440, 140401113550847,
+SNULL, 140401113554943, 140401121943551,
+STORE, 140401113550848, 140401113554943,
+STORE, 140401113554944, 140401121943551,
+STORE, 140401088372736, 140401113550847,
+SNULL, 140401088372736, 140401096765439,
+STORE, 140401096765440, 140401113550847,
+STORE, 140401088372736, 140401096765439,
+SNULL, 140401096769535, 140401113550847,
+STORE, 140401096765440, 140401096769535,
+STORE, 140401096769536, 140401113550847,
+SNULL, 140401096769536, 140401105158143,
+STORE, 140401105158144, 140401113550847,
+STORE, 140401096769536, 140401105158143,
+SNULL, 140401105162239, 140401113550847,
+STORE, 140401105158144, 140401105162239,
+STORE, 140401105162240, 140401113550847,
+SNULL, 140401088376831, 140401096765439,
+STORE, 140401088372736, 140401088376831,
+STORE, 140401088376832, 140401096765439,
+STORE, 140401079980032, 140401088372735,
+STORE, 140400996118528, 140401004511231,
+SNULL, 140401079984127, 140401088372735,
+STORE, 140401079980032, 140401079984127,
+STORE, 140401079984128, 140401088372735,
+SNULL, 140400996122623, 140401004511231,
+STORE, 140400996118528, 140400996122623,
+STORE, 140400996122624, 140401004511231,
+STORE, 140400987725824, 140400996118527,
+STORE, 140400979333120, 140400996118527,
+STORE, 140400803184640, 140400870293503,
+SNULL, 140400803319807, 140400870293503,
+STORE, 140400803184640, 140400803319807,
+STORE, 140400803319808, 140400870293503,
+SNULL, 140400979333120, 140400987725823,
+STORE, 140400987725824, 140400996118527,
+STORE, 140400979333120, 140400987725823,
+SNULL, 140400987729919, 140400996118527,
+STORE, 140400987725824, 140400987729919,
+STORE, 140400987729920, 140400996118527,
+STORE, 140400970940416, 140400987725823,
+STORE, 140400962547712, 140400987725823,
+STORE, 140400668966912, 140400803184639,
+STORE, 140400954155008, 140400987725823,
+STORE, 140400945762304, 140400987725823,
+STORE, 140400660574208, 140400668966911,
+STORE, 140400593465344, 140400660574207,
+STORE, 140400585072640, 140400593465343,
+STORE, 140400450854912, 140400585072639,
+STORE, 140400442462208, 140400450854911,
+STORE, 140400434069504, 140400450854911,
+STORE, 140400299851776, 140400434069503,
+STORE, 140400291459072, 140400299851775,
+SNULL, 140400299851776, 140400333422591,
+STORE, 140400333422592, 140400434069503,
+STORE, 140400299851776, 140400333422591,
+ERASE, 140400299851776, 140400333422591,
+STORE, 140400325029888, 140400333422591,
+STORE, 140400157241344, 140400291459071,
+STORE, 140400316637184, 140400333422591,
+STORE, 140400308244480, 140400333422591,
+STORE, 140400023023616, 140400291459071,
+STORE, 140400291459072, 140400333422591,
+SNULL, 140400023023616, 140400064987135,
+STORE, 140400064987136, 140400291459071,
+STORE, 140400023023616, 140400064987135,
+ERASE, 140400023023616, 140400064987135,
+STORE, 140400056594432, 140400064987135,
+SNULL, 140400056598527, 140400064987135,
+STORE, 140400056594432, 140400056598527,
+STORE, 140400056598528, 140400064987135,
+STORE, 140399989485568, 140400056594431,
+SNULL, 140400291459072, 140400316637183,
+STORE, 140400316637184, 140400333422591,
+STORE, 140400291459072, 140400316637183,
+SNULL, 140400316641279, 140400333422591,
+STORE, 140400316637184, 140400316641279,
+STORE, 140400316641280, 140400333422591,
+STORE, 140399855267840, 140400056594431,
+SNULL, 140399855267840, 140399863660543,
+STORE, 140399863660544, 140400056594431,
+STORE, 140399855267840, 140399863660543,
+ERASE, 140399855267840, 140399863660543,
+SNULL, 140400736075775, 140400803184639,
+STORE, 140400668966912, 140400736075775,
+STORE, 140400736075776, 140400803184639,
+ERASE, 140400736075776, 140400803184639,
+SNULL, 140400669102079, 140400736075775,
+STORE, 140400668966912, 140400669102079,
+STORE, 140400669102080, 140400736075775,
+STORE, 140400669102080, 140400803184639,
+SNULL, 140400669102080, 140400736075775,
+STORE, 140400736075776, 140400803184639,
+STORE, 140400669102080, 140400736075775,
+SNULL, 140400736210943, 140400803184639,
+STORE, 140400736075776, 140400736210943,
+STORE, 140400736210944, 140400803184639,
+ERASE, 140400593465344, 140400660574207,
+SNULL, 140400450854912, 140400467640319,
+STORE, 140400467640320, 140400585072639,
+STORE, 140400450854912, 140400467640319,
+ERASE, 140400450854912, 140400467640319,
+STORE, 140399729442816, 140400056594431,
+SNULL, 140400400531455, 140400434069503,
+STORE, 140400333422592, 140400400531455,
+STORE, 140400400531456, 140400434069503,
+ERASE, 140400400531456, 140400434069503,
+SNULL, 140400333557759, 140400400531455,
+STORE, 140400333422592, 140400333557759,
+STORE, 140400333557760, 140400400531455,
+SNULL, 140400157241343, 140400291459071,
+STORE, 140400064987136, 140400157241343,
+STORE, 140400157241344, 140400291459071,
+SNULL, 140400157241344, 140400199204863,
+STORE, 140400199204864, 140400291459071,
+STORE, 140400157241344, 140400199204863,
+ERASE, 140400157241344, 140400199204863,
+SNULL, 140400266313727, 140400291459071,
+STORE, 140400199204864, 140400266313727,
+STORE, 140400266313728, 140400291459071,
+ERASE, 140400266313728, 140400291459071,
+SNULL, 140400132095999, 140400157241343,
+STORE, 140400064987136, 140400132095999,
+STORE, 140400132096000, 140400157241343,
+ERASE, 140400132096000, 140400157241343,
+SNULL, 140400065122303, 140400132095999,
+STORE, 140400064987136, 140400065122303,
+STORE, 140400065122304, 140400132095999,
+SNULL, 140400945762304, 140400954155007,
+STORE, 140400954155008, 140400987725823,
+STORE, 140400945762304, 140400954155007,
+SNULL, 140400954159103, 140400987725823,
+STORE, 140400954155008, 140400954159103,
+STORE, 140400954159104, 140400987725823,
+SNULL, 140400434069504, 140400442462207,
+STORE, 140400442462208, 140400450854911,
+STORE, 140400434069504, 140400442462207,
+SNULL, 140400442466303, 140400450854911,
+STORE, 140400442462208, 140400442466303,
+STORE, 140400442466304, 140400450854911,
+SNULL, 140400291463167, 140400316637183,
+STORE, 140400291459072, 140400291463167,
+STORE, 140400291463168, 140400316637183,
+STORE, 140400652181504, 140400668966911,
+STORE, 140400643788800, 140400668966911,
+SNULL, 140400291463168, 140400299851775,
+STORE, 140400299851776, 140400316637183,
+STORE, 140400291463168, 140400299851775,
+SNULL, 140400299855871, 140400316637183,
+STORE, 140400299851776, 140400299855871,
+STORE, 140400299855872, 140400316637183,
+STORE, 140400635396096, 140400668966911,
+SNULL, 140400635396096, 140400643788799,
+STORE, 140400643788800, 140400668966911,
+STORE, 140400635396096, 140400643788799,
+SNULL, 140400643792895, 140400668966911,
+STORE, 140400643788800, 140400643792895,
+STORE, 140400643792896, 140400668966911,
+SNULL, 140399989485567, 140400056594431,
+STORE, 140399729442816, 140399989485567,
+STORE, 140399989485568, 140400056594431,
+ERASE, 140399989485568, 140400056594431,
+SNULL, 140399930769407, 140399989485567,
+STORE, 140399729442816, 140399930769407,
+STORE, 140399930769408, 140399989485567,
+ERASE, 140399930769408, 140399989485567,
+SNULL, 140400945766399, 140400954155007,
+STORE, 140400945762304, 140400945766399,
+STORE, 140400945766400, 140400954155007,
+SNULL, 140400534749183, 140400585072639,
+STORE, 140400467640320, 140400534749183,
+STORE, 140400534749184, 140400585072639,
+ERASE, 140400534749184, 140400585072639,
+SNULL, 140399796551679, 140399930769407,
+STORE, 140399729442816, 140399796551679,
+STORE, 140399796551680, 140399930769407,
+SNULL, 140399796551680, 140399863660543,
+STORE, 140399863660544, 140399930769407,
+STORE, 140399796551680, 140399863660543,
+ERASE, 140399796551680, 140399863660543,
+SNULL, 140400199340031, 140400266313727,
+STORE, 140400199204864, 140400199340031,
+STORE, 140400199340032, 140400266313727,
+STORE, 140400627003392, 140400643788799,
+SNULL, 140400316641280, 140400325029887,
+STORE, 140400325029888, 140400333422591,
+STORE, 140400316641280, 140400325029887,
+SNULL, 140400325033983, 140400333422591,
+STORE, 140400325029888, 140400325033983,
+STORE, 140400325033984, 140400333422591,
+SNULL, 140400627003392, 140400635396095,
+STORE, 140400635396096, 140400643788799,
+STORE, 140400627003392, 140400635396095,
+SNULL, 140400635400191, 140400643788799,
+STORE, 140400635396096, 140400635400191,
+STORE, 140400635400192, 140400643788799,
+SNULL, 140400434073599, 140400442462207,
+STORE, 140400434069504, 140400434073599,
+STORE, 140400434073600, 140400442462207,
+STORE, 140400618610688, 140400635396095,
+STORE, 140400610217984, 140400635396095,
+SNULL, 140400954159104, 140400962547711,
+STORE, 140400962547712, 140400987725823,
+STORE, 140400954159104, 140400962547711,
+SNULL, 140400962551807, 140400987725823,
+STORE, 140400962547712, 140400962551807,
+STORE, 140400962551808, 140400987725823,
+SNULL, 140400299855872, 140400308244479,
+STORE, 140400308244480, 140400316637183,
+STORE, 140400299855872, 140400308244479,
+SNULL, 140400308248575, 140400316637183,
+STORE, 140400308244480, 140400308248575,
+STORE, 140400308248576, 140400316637183,
+STORE, 140400601825280, 140400635396095,
+SNULL, 140400601829375, 140400635396095,
+STORE, 140400601825280, 140400601829375,
+STORE, 140400601829376, 140400635396095,
+STORE, 140400576679936, 140400593465343,
+SNULL, 140400576684031, 140400593465343,
+STORE, 140400576679936, 140400576684031,
+STORE, 140400576684032, 140400593465343,
+SNULL, 140400643792896, 140400652181503,
+STORE, 140400652181504, 140400668966911,
+STORE, 140400643792896, 140400652181503,
+SNULL, 140400652185599, 140400668966911,
+STORE, 140400652181504, 140400652185599,
+STORE, 140400652185600, 140400668966911,
+STORE, 140399595225088, 140399796551679,
+SNULL, 140399662333951, 140399796551679,
+STORE, 140399595225088, 140399662333951,
+STORE, 140399662333952, 140399796551679,
+SNULL, 140399662333952, 140399729442815,
+STORE, 140399729442816, 140399796551679,
+STORE, 140399662333952, 140399729442815,
+ERASE, 140399662333952, 140399729442815,
+SNULL, 140399863795711, 140399930769407,
+STORE, 140399863660544, 140399863795711,
+STORE, 140399863795712, 140399930769407,
+STORE, 140400568287232, 140400576679935,
+SNULL, 140400568291327, 140400576679935,
+STORE, 140400568287232, 140400568291327,
+STORE, 140400568291328, 140400576679935,
+SNULL, 140400467775487, 140400534749183,
+STORE, 140400467640320, 140400467775487,
+STORE, 140400467775488, 140400534749183,
+SNULL, 140399729577983, 140399796551679,
+STORE, 140399729442816, 140399729577983,
+STORE, 140399729577984, 140399796551679,
+SNULL, 140400601829376, 140400627003391,
+STORE, 140400627003392, 140400635396095,
+STORE, 140400601829376, 140400627003391,
+SNULL, 140400627007487, 140400635396095,
+STORE, 140400627003392, 140400627007487,
+STORE, 140400627007488, 140400635396095,
+STORE, 140400559894528, 140400568287231,
+STORE, 140400551501824, 140400568287231,
+STORE, 140400543109120, 140400568287231,
+STORE, 140400459247616, 140400467640319,
+STORE, 140400442466304, 140400467640319,
+SNULL, 140399595360255, 140399662333951,
+STORE, 140399595225088, 140399595360255,
+STORE, 140399595360256, 140399662333951,
+SNULL, 140400962551808, 140400970940415,
+STORE, 140400970940416, 140400987725823,
+STORE, 140400962551808, 140400970940415,
+SNULL, 140400970944511, 140400987725823,
+STORE, 140400970940416, 140400970944511,
+STORE, 140400970944512, 140400987725823,
+SNULL, 140400652185600, 140400660574207,
+STORE, 140400660574208, 140400668966911,
+STORE, 140400652185600, 140400660574207,
+SNULL, 140400660578303, 140400668966911,
+STORE, 140400660574208, 140400660578303,
+STORE, 140400660578304, 140400668966911,
+SNULL, 140400576684032, 140400585072639,
+STORE, 140400585072640, 140400593465343,
+STORE, 140400576684032, 140400585072639,
+SNULL, 140400585076735, 140400593465343,
+STORE, 140400585072640, 140400585076735,
+STORE, 140400585076736, 140400593465343,
+STORE, 140400425676800, 140400434069503,
+STORE, 140400417284096, 140400434069503,
+STORE, 140400408891392, 140400434069503,
+SNULL, 140400408891392, 140400417284095,
+STORE, 140400417284096, 140400434069503,
+STORE, 140400408891392, 140400417284095,
+SNULL, 140400417288191, 140400434069503,
+STORE, 140400417284096, 140400417288191,
+STORE, 140400417288192, 140400434069503,
+STORE, 140400283066368, 140400291459071,
+SNULL, 140400601829376, 140400618610687,
+STORE, 140400618610688, 140400627003391,
+STORE, 140400601829376, 140400618610687,
+SNULL, 140400618614783, 140400627003391,
+STORE, 140400618610688, 140400618614783,
+STORE, 140400618614784, 140400627003391,
+SNULL, 140400601829376, 140400610217983,
+STORE, 140400610217984, 140400618610687,
+STORE, 140400601829376, 140400610217983,
+SNULL, 140400610222079, 140400618610687,
+STORE, 140400610217984, 140400610222079,
+STORE, 140400610222080, 140400618610687,
+STORE, 140400274673664, 140400291459071,
+STORE, 140400190812160, 140400199204863,
+STORE, 140400182419456, 140400199204863,
+SNULL, 140400442466304, 140400450854911,
+STORE, 140400450854912, 140400467640319,
+STORE, 140400442466304, 140400450854911,
+SNULL, 140400450859007, 140400467640319,
+STORE, 140400450854912, 140400450859007,
+STORE, 140400450859008, 140400467640319,
+SNULL, 140400543109120, 140400559894527,
+STORE, 140400559894528, 140400568287231,
+STORE, 140400543109120, 140400559894527,
+SNULL, 140400559898623, 140400568287231,
+STORE, 140400559894528, 140400559898623,
+STORE, 140400559898624, 140400568287231,
+SNULL, 140400450859008, 140400459247615,
+STORE, 140400459247616, 140400467640319,
+STORE, 140400450859008, 140400459247615,
+SNULL, 140400459251711, 140400467640319,
+STORE, 140400459247616, 140400459251711,
+STORE, 140400459251712, 140400467640319,
+SNULL, 140400543113215, 140400559894527,
+STORE, 140400543109120, 140400543113215,
+STORE, 140400543113216, 140400559894527,
+SNULL, 140400970944512, 140400979333119,
+STORE, 140400979333120, 140400987725823,
+STORE, 140400970944512, 140400979333119,
+SNULL, 140400979337215, 140400987725823,
+STORE, 140400979333120, 140400979337215,
+STORE, 140400979337216, 140400987725823,
+STORE, 140400174026752, 140400199204863,
+SNULL, 140400174030847, 140400199204863,
+STORE, 140400174026752, 140400174030847,
+STORE, 140400174030848, 140400199204863,
+SNULL, 140400274673664, 140400283066367,
+STORE, 140400283066368, 140400291459071,
+STORE, 140400274673664, 140400283066367,
+SNULL, 140400283070463, 140400291459071,
+STORE, 140400283066368, 140400283070463,
+STORE, 140400283070464, 140400291459071,
+STORE, 140400165634048, 140400174026751,
+SNULL, 140400165638143, 140400174026751,
+STORE, 140400165634048, 140400165638143,
+STORE, 140400165638144, 140400174026751,
+SNULL, 140400174030848, 140400182419455,
+STORE, 140400182419456, 140400199204863,
+STORE, 140400174030848, 140400182419455,
+SNULL, 140400182423551, 140400199204863,
+STORE, 140400182419456, 140400182423551,
+STORE, 140400182423552, 140400199204863,
+SNULL, 140400182423552, 140400190812159,
+STORE, 140400190812160, 140400199204863,
+STORE, 140400182423552, 140400190812159,
+SNULL, 140400190816255, 140400199204863,
+STORE, 140400190812160, 140400190816255,
+STORE, 140400190816256, 140400199204863,
+STORE, 140400157241344, 140400165634047,
+SNULL, 140400157245439, 140400165634047,
+STORE, 140400157241344, 140400157245439,
+STORE, 140400157245440, 140400165634047,
+SNULL, 140400408895487, 140400417284095,
+STORE, 140400408891392, 140400408895487,
+STORE, 140400408895488, 140400417284095,
+SNULL, 140400417288192, 140400425676799,
+STORE, 140400425676800, 140400434069503,
+STORE, 140400417288192, 140400425676799,
+SNULL, 140400425680895, 140400434069503,
+STORE, 140400425676800, 140400425680895,
+STORE, 140400425680896, 140400434069503,
+STORE, 140400148848640, 140400157241343,
+SNULL, 140400148852735, 140400157241343,
+STORE, 140400148848640, 140400148852735,
+STORE, 140400148852736, 140400157241343,
+SNULL, 140400543113216, 140400551501823,
+STORE, 140400551501824, 140400559894527,
+STORE, 140400543113216, 140400551501823,
+SNULL, 140400551505919, 140400559894527,
+STORE, 140400551501824, 140400551505919,
+STORE, 140400551505920, 140400559894527,
+STORE, 140400140455936, 140400148848639,
+STORE, 140400048201728, 140400056594431,
+SNULL, 140400140460031, 140400148848639,
+STORE, 140400140455936, 140400140460031,
+STORE, 140400140460032, 140400148848639,
+STORE, 140400039809024, 140400056594431,
+SNULL, 140400039813119, 140400056594431,
+STORE, 140400039809024, 140400039813119,
+STORE, 140400039813120, 140400056594431,
+STORE, 140400031416320, 140400039809023,
+STORE, 140400023023616, 140400039809023,
+SNULL, 140400274677759, 140400283066367,
+STORE, 140400274673664, 140400274677759,
+STORE, 140400274677760, 140400283066367,
+STORE, 140400014630912, 140400039809023,
+STORE, 140400006238208, 140400039809023,
+STORE, 140399997845504, 140400039809023,
+SNULL, 140399997849599, 140400039809023,
+STORE, 140399997845504, 140399997849599,
+STORE, 140399997849600, 140400039809023,
+STORE, 140399989452800, 140399997845503,
+SNULL, 140399989456895, 140399997845503,
+STORE, 140399989452800, 140399989456895,
+STORE, 140399989456896, 140399997845503,
+STORE, 140399981060096, 140399989452799,
+SNULL, 140399981064191, 140399989452799,
+STORE, 140399981060096, 140399981064191,
+STORE, 140399981064192, 140399989452799,
+STORE, 140399972667392, 140399981060095,
+STORE, 140399964274688, 140399981060095,
+SNULL, 140399964278783, 140399981060095,
+STORE, 140399964274688, 140399964278783,
+STORE, 140399964278784, 140399981060095,
+SNULL, 140400039813120, 140400048201727,
+STORE, 140400048201728, 140400056594431,
+STORE, 140400039813120, 140400048201727,
+SNULL, 140400048205823, 140400056594431,
+STORE, 140400048201728, 140400048205823,
+STORE, 140400048205824, 140400056594431,
+SNULL, 140399997849600, 140400031416319,
+STORE, 140400031416320, 140400039809023,
+STORE, 140399997849600, 140400031416319,
+SNULL, 140400031420415, 140400039809023,
+STORE, 140400031416320, 140400031420415,
+STORE, 140400031420416, 140400039809023,
+STORE, 140399955881984, 140399964274687,
+SNULL, 140399955886079, 140399964274687,
+STORE, 140399955881984, 140399955886079,
+STORE, 140399955886080, 140399964274687,
+STORE, 140399947489280, 140399955881983,
+STORE, 140399939096576, 140399955881983,
+STORE, 140399855267840, 140399863660543,
+SNULL, 140399939100671, 140399955881983,
+STORE, 140399939096576, 140399939100671,
+STORE, 140399939100672, 140399955881983,
+SNULL, 140399997849600, 140400014630911,
+STORE, 140400014630912, 140400031416319,
+STORE, 140399997849600, 140400014630911,
+SNULL, 140400014635007, 140400031416319,
+STORE, 140400014630912, 140400014635007,
+STORE, 140400014635008, 140400031416319,
+SNULL, 140400014635008, 140400023023615,
+STORE, 140400023023616, 140400031416319,
+STORE, 140400014635008, 140400023023615,
+SNULL, 140400023027711, 140400031416319,
+STORE, 140400023023616, 140400023027711,
+STORE, 140400023027712, 140400031416319,
+SNULL, 140399997849600, 140400006238207,
+STORE, 140400006238208, 140400014630911,
+STORE, 140399997849600, 140400006238207,
+SNULL, 140400006242303, 140400014630911,
+STORE, 140400006238208, 140400006242303,
+STORE, 140400006242304, 140400014630911,
+STORE, 140399846875136, 140399863660543,
+STORE, 140399838482432, 140399863660543,
+SNULL, 140399838486527, 140399863660543,
+STORE, 140399838482432, 140399838486527,
+STORE, 140399838486528, 140399863660543,
+SNULL, 140399939100672, 140399947489279,
+STORE, 140399947489280, 140399955881983,
+STORE, 140399939100672, 140399947489279,
+SNULL, 140399947493375, 140399955881983,
+STORE, 140399947489280, 140399947493375,
+STORE, 140399947493376, 140399955881983,
+SNULL, 140399964278784, 140399972667391,
+STORE, 140399972667392, 140399981060095,
+STORE, 140399964278784, 140399972667391,
+SNULL, 140399972671487, 140399981060095,
+STORE, 140399972667392, 140399972671487,
+STORE, 140399972671488, 140399981060095,
+SNULL, 140399838486528, 140399855267839,
+STORE, 140399855267840, 140399863660543,
+STORE, 140399838486528, 140399855267839,
+SNULL, 140399855271935, 140399863660543,
+STORE, 140399855267840, 140399855271935,
+STORE, 140399855271936, 140399863660543,
+STORE, 140399830089728, 140399838482431,
+SNULL, 140399830093823, 140399838482431,
+STORE, 140399830089728, 140399830093823,
+STORE, 140399830093824, 140399838482431,
+STORE, 140399821697024, 140399830089727,
+SNULL, 140399821701119, 140399830089727,
+STORE, 140399821697024, 140399821701119,
+STORE, 140399821701120, 140399830089727,
+SNULL, 140399838486528, 140399846875135,
+STORE, 140399846875136, 140399855267839,
+STORE, 140399838486528, 140399846875135,
+SNULL, 140399846879231, 140399855267839,
+STORE, 140399846875136, 140399846879231,
+STORE, 140399846879232, 140399855267839,
+STORE, 140399813304320, 140399821697023,
+STORE, 140399804911616, 140399821697023,
+SNULL, 140399804915711, 140399821697023,
+STORE, 140399804911616, 140399804915711,
+STORE, 140399804915712, 140399821697023,
+STORE, 140399721050112, 140399729442815,
+SNULL, 140399804915712, 140399813304319,
+STORE, 140399813304320, 140399821697023,
+STORE, 140399804915712, 140399813304319,
+SNULL, 140399813308415, 140399821697023,
+STORE, 140399813304320, 140399813308415,
+STORE, 140399813308416, 140399821697023,
+SNULL, 140399721054207, 140399729442815,
+STORE, 140399721050112, 140399721054207,
+STORE, 140399721054208, 140399729442815,
+STORE, 140401467105280, 140401467133951,
+STORE, 140401279115264, 140401281306623,
+SNULL, 140401279115264, 140401279205375,
+STORE, 140401279205376, 140401281306623,
+STORE, 140401279115264, 140401279205375,
+SNULL, 140401281298431, 140401281306623,
+STORE, 140401279205376, 140401281298431,
+STORE, 140401281298432, 140401281306623,
+ERASE, 140401281298432, 140401281306623,
+STORE, 140401281298432, 140401281306623,
+SNULL, 140401281302527, 140401281306623,
+STORE, 140401281298432, 140401281302527,
+STORE, 140401281302528, 140401281306623,
+ERASE, 140401467105280, 140401467133951,
+ERASE, 140400056594432, 140400056598527,
+ERASE, 140400056598528, 140400064987135,
+ERASE, 140400635396096, 140400635400191,
+ERASE, 140400635400192, 140400643788799,
+ERASE, 140400408891392, 140400408895487,
+ERASE, 140400408895488, 140400417284095,
+ERASE, 140400299851776, 140400299855871,
+ERASE, 140400299855872, 140400308244479,
+ERASE, 140400627003392, 140400627007487,
+ERASE, 140400627007488, 140400635396095,
+ERASE, 140400954155008, 140400954159103,
+ERASE, 140400954159104, 140400962547711,
+ERASE, 140400291459072, 140400291463167,
+ERASE, 140400291463168, 140400299851775,
+ERASE, 140400643788800, 140400643792895,
+ERASE, 140400643792896, 140400652181503,
+ERASE, 140400325029888, 140400325033983,
+ERASE, 140400325033984, 140400333422591,
+ERASE, 140400610217984, 140400610222079,
+ERASE, 140400610222080, 140400618610687,
+ERASE, 140400190812160, 140400190816255,
+ERASE, 140400190816256, 140400199204863,
+ERASE, 140399964274688, 140399964278783,
+ERASE, 140399964278784, 140399972667391,
+ERASE, 140400945762304, 140400945766399,
+ERASE, 140400945766400, 140400954155007,
+ERASE, 140400568287232, 140400568291327,
+ERASE, 140400568291328, 140400576679935,
+ERASE, 140399972667392, 140399972671487,
+ERASE, 140399972671488, 140399981060095,
+ERASE, 140400962547712, 140400962551807,
+ERASE, 140400962551808, 140400970940415,
+ERASE, 140400987725824, 140400987729919,
+ERASE, 140400987729920, 140400996118527,
+ERASE, 140400652181504, 140400652185599,
+ERASE, 140400652185600, 140400660574207,
+ERASE, 140400450854912, 140400450859007,
+ERASE, 140400450859008, 140400459247615,
+ERASE, 140400031416320, 140400031420415,
+ERASE, 140400031420416, 140400039809023,
+ERASE, 140400308244480, 140400308248575,
+ERASE, 140400308248576, 140400316637183,
+ERASE, 140400434069504, 140400434073599,
+ERASE, 140400434073600, 140400442462207,
+ERASE, 140400543109120, 140400543113215,
+ERASE, 140400543113216, 140400551501823,
+ERASE, 140400023023616, 140400023027711,
+ERASE, 140400023027712, 140400031416319,
+ERASE, 140399813304320, 140399813308415,
+ERASE, 140399813308416, 140399821697023,
+ERASE, 140400316637184, 140400316641279,
+ERASE, 140400316641280, 140400325029887,
+ERASE, 140400585072640, 140400585076735,
+ERASE, 140400585076736, 140400593465343,
+ERASE, 140400148848640, 140400148852735,
+ERASE, 140400148852736, 140400157241343,
+ERASE, 140399955881984, 140399955886079,
+ERASE, 140399955886080, 140399964274687,
+ERASE, 140399821697024, 140399821701119,
+ERASE, 140399821701120, 140399830089727,
+ERASE, 140400601825280, 140400601829375,
+ERASE, 140400601829376, 140400610217983,
+ERASE, 140400979333120, 140400979337215,
+ERASE, 140400979337216, 140400987725823,
+ERASE, 140399997845504, 140399997849599,
+ERASE, 140399997849600, 140400006238207,
+ERASE, 140400459247616, 140400459251711,
+ERASE, 140400459251712, 140400467640319,
+ERASE, 140400551501824, 140400551505919,
+ERASE, 140400551505920, 140400559894527,
+ERASE, 140399939096576, 140399939100671,
+ERASE, 140399939100672, 140399947489279,
+ERASE, 140400442462208, 140400442466303,
+ERASE, 140400442466304, 140400450854911,
+ERASE, 140400576679936, 140400576684031,
+ERASE, 140400576684032, 140400585072639,
+ERASE, 140400559894528, 140400559898623,
+ERASE, 140400559898624, 140400568287231,
+ERASE, 140400417284096, 140400417288191,
+ERASE, 140400417288192, 140400425676799,
+ERASE, 140400283066368, 140400283070463,
+ERASE, 140400283070464, 140400291459071,
+ };
+ unsigned long set33[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734562918400, 140737488351231,
+SNULL, 140734562922495, 140737488351231,
+STORE, 140734562918400, 140734562922495,
+STORE, 140734562787328, 140734562922495,
+STORE, 94133878984704, 94133881237503,
+SNULL, 94133879115775, 94133881237503,
+STORE, 94133878984704, 94133879115775,
+STORE, 94133879115776, 94133881237503,
+ERASE, 94133879115776, 94133881237503,
+STORE, 94133881208832, 94133881217023,
+STORE, 94133881217024, 94133881237503,
+STORE, 140583654043648, 140583656296447,
+SNULL, 140583654187007, 140583656296447,
+STORE, 140583654043648, 140583654187007,
+STORE, 140583654187008, 140583656296447,
+ERASE, 140583654187008, 140583656296447,
+STORE, 140583656284160, 140583656292351,
+STORE, 140583656292352, 140583656296447,
+STORE, 140734564319232, 140734564323327,
+STORE, 140734564306944, 140734564319231,
+STORE, 140583656255488, 140583656284159,
+STORE, 140583656247296, 140583656255487,
+STORE, 140583651827712, 140583654043647,
+SNULL, 140583651827712, 140583651926015,
+STORE, 140583651926016, 140583654043647,
+STORE, 140583651827712, 140583651926015,
+SNULL, 140583654019071, 140583654043647,
+STORE, 140583651926016, 140583654019071,
+STORE, 140583654019072, 140583654043647,
+SNULL, 140583654019072, 140583654027263,
+STORE, 140583654027264, 140583654043647,
+STORE, 140583654019072, 140583654027263,
+ERASE, 140583654019072, 140583654027263,
+STORE, 140583654019072, 140583654027263,
+ERASE, 140583654027264, 140583654043647,
+STORE, 140583654027264, 140583654043647,
+STORE, 140583648030720, 140583651827711,
+SNULL, 140583648030720, 140583649689599,
+STORE, 140583649689600, 140583651827711,
+STORE, 140583648030720, 140583649689599,
+SNULL, 140583651786751, 140583651827711,
+STORE, 140583649689600, 140583651786751,
+STORE, 140583651786752, 140583651827711,
+SNULL, 140583651786752, 140583651811327,
+STORE, 140583651811328, 140583651827711,
+STORE, 140583651786752, 140583651811327,
+ERASE, 140583651786752, 140583651811327,
+STORE, 140583651786752, 140583651811327,
+ERASE, 140583651811328, 140583651827711,
+STORE, 140583651811328, 140583651827711,
+STORE, 140583656239104, 140583656255487,
+SNULL, 140583651803135, 140583651811327,
+STORE, 140583651786752, 140583651803135,
+STORE, 140583651803136, 140583651811327,
+SNULL, 140583654023167, 140583654027263,
+STORE, 140583654019072, 140583654023167,
+STORE, 140583654023168, 140583654027263,
+SNULL, 94133881212927, 94133881217023,
+STORE, 94133881208832, 94133881212927,
+STORE, 94133881212928, 94133881217023,
+SNULL, 140583656288255, 140583656292351,
+STORE, 140583656284160, 140583656288255,
+STORE, 140583656288256, 140583656292351,
+ERASE, 140583656255488, 140583656284159,
+STORE, 94133881733120, 94133881868287,
+STORE, 140583639638016, 140583648030719,
+SNULL, 140583639642111, 140583648030719,
+STORE, 140583639638016, 140583639642111,
+STORE, 140583639642112, 140583648030719,
+STORE, 140583631245312, 140583639638015,
+STORE, 140583497027584, 140583631245311,
+SNULL, 140583497027584, 140583540621311,
+STORE, 140583540621312, 140583631245311,
+STORE, 140583497027584, 140583540621311,
+ERASE, 140583497027584, 140583540621311,
+SNULL, 140583607730175, 140583631245311,
+STORE, 140583540621312, 140583607730175,
+STORE, 140583607730176, 140583631245311,
+ERASE, 140583607730176, 140583631245311,
+SNULL, 140583540756479, 140583607730175,
+STORE, 140583540621312, 140583540756479,
+STORE, 140583540756480, 140583607730175,
+SNULL, 140583631249407, 140583639638015,
+STORE, 140583631245312, 140583631249407,
+STORE, 140583631249408, 140583639638015,
+STORE, 140583622852608, 140583631245311,
+SNULL, 140583622856703, 140583631245311,
+STORE, 140583622852608, 140583622856703,
+STORE, 140583622856704, 140583631245311,
+STORE, 140583614459904, 140583622852607,
+SNULL, 140583614463999, 140583622852607,
+STORE, 140583614459904, 140583614463999,
+STORE, 140583614464000, 140583622852607,
+STORE, 140583532228608, 140583540621311,
+SNULL, 140583532232703, 140583540621311,
+STORE, 140583532228608, 140583532232703,
+STORE, 140583532232704, 140583540621311,
+STORE, 140583523835904, 140583532228607,
+STORE, 140583515443200, 140583532228607,
+STORE, 140583507050496, 140583532228607,
+STORE, 140583372832768, 140583507050495,
+STORE, 140583364440064, 140583372832767,
+STORE, 140583230222336, 140583364440063,
+STORE, 140583096004608, 140583364440063,
+SNULL, 140583230222335, 140583364440063,
+STORE, 140583096004608, 140583230222335,
+STORE, 140583230222336, 140583364440063,
+SNULL, 140583230222336, 140583272185855,
+STORE, 140583272185856, 140583364440063,
+STORE, 140583230222336, 140583272185855,
+ERASE, 140583230222336, 140583272185855,
+STORE, 140582961786880, 140583230222335,
+SNULL, 140583372832768, 140583406403583,
+STORE, 140583406403584, 140583507050495,
+STORE, 140583372832768, 140583406403583,
+ERASE, 140583372832768, 140583406403583,
+SNULL, 140583473512447, 140583507050495,
+STORE, 140583406403584, 140583473512447,
+STORE, 140583473512448, 140583507050495,
+ERASE, 140583473512448, 140583507050495,
+SNULL, 140583096004607, 140583230222335,
+STORE, 140582961786880, 140583096004607,
+STORE, 140583096004608, 140583230222335,
+SNULL, 140583096004608, 140583137968127,
+STORE, 140583137968128, 140583230222335,
+STORE, 140583096004608, 140583137968127,
+ERASE, 140583096004608, 140583137968127,
+SNULL, 140583339294719, 140583364440063,
+STORE, 140583272185856, 140583339294719,
+STORE, 140583339294720, 140583364440063,
+ERASE, 140583339294720, 140583364440063,
+SNULL, 140583272321023, 140583339294719,
+STORE, 140583272185856, 140583272321023,
+STORE, 140583272321024, 140583339294719,
+SNULL, 140582961786880, 140583003750399,
+STORE, 140583003750400, 140583096004607,
+STORE, 140582961786880, 140583003750399,
+ERASE, 140582961786880, 140583003750399,
+ };
+
+ unsigned long set34[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140731327180800, 140737488351231,
+SNULL, 140731327184895, 140737488351231,
+STORE, 140731327180800, 140731327184895,
+STORE, 140731327049728, 140731327184895,
+STORE, 94632924487680, 94632926740479,
+SNULL, 94632924618751, 94632926740479,
+STORE, 94632924487680, 94632924618751,
+STORE, 94632924618752, 94632926740479,
+ERASE, 94632924618752, 94632926740479,
+STORE, 94632926711808, 94632926719999,
+STORE, 94632926720000, 94632926740479,
+STORE, 140012544888832, 140012547141631,
+SNULL, 140012545032191, 140012547141631,
+STORE, 140012544888832, 140012545032191,
+STORE, 140012545032192, 140012547141631,
+ERASE, 140012545032192, 140012547141631,
+STORE, 140012547129344, 140012547137535,
+STORE, 140012547137536, 140012547141631,
+STORE, 140731327725568, 140731327729663,
+STORE, 140731327713280, 140731327725567,
+STORE, 140012547100672, 140012547129343,
+STORE, 140012547092480, 140012547100671,
+STORE, 140012542672896, 140012544888831,
+SNULL, 140012542672896, 140012542771199,
+STORE, 140012542771200, 140012544888831,
+STORE, 140012542672896, 140012542771199,
+SNULL, 140012544864255, 140012544888831,
+STORE, 140012542771200, 140012544864255,
+STORE, 140012544864256, 140012544888831,
+SNULL, 140012544864256, 140012544872447,
+STORE, 140012544872448, 140012544888831,
+STORE, 140012544864256, 140012544872447,
+ERASE, 140012544864256, 140012544872447,
+STORE, 140012544864256, 140012544872447,
+ERASE, 140012544872448, 140012544888831,
+STORE, 140012544872448, 140012544888831,
+STORE, 140012538875904, 140012542672895,
+SNULL, 140012538875904, 140012540534783,
+STORE, 140012540534784, 140012542672895,
+STORE, 140012538875904, 140012540534783,
+SNULL, 140012542631935, 140012542672895,
+STORE, 140012540534784, 140012542631935,
+STORE, 140012542631936, 140012542672895,
+SNULL, 140012542631936, 140012542656511,
+STORE, 140012542656512, 140012542672895,
+STORE, 140012542631936, 140012542656511,
+ERASE, 140012542631936, 140012542656511,
+STORE, 140012542631936, 140012542656511,
+ERASE, 140012542656512, 140012542672895,
+STORE, 140012542656512, 140012542672895,
+STORE, 140012547084288, 140012547100671,
+SNULL, 140012542648319, 140012542656511,
+STORE, 140012542631936, 140012542648319,
+STORE, 140012542648320, 140012542656511,
+SNULL, 140012544868351, 140012544872447,
+STORE, 140012544864256, 140012544868351,
+STORE, 140012544868352, 140012544872447,
+SNULL, 94632926715903, 94632926719999,
+STORE, 94632926711808, 94632926715903,
+STORE, 94632926715904, 94632926719999,
+SNULL, 140012547133439, 140012547137535,
+STORE, 140012547129344, 140012547133439,
+STORE, 140012547133440, 140012547137535,
+ERASE, 140012547100672, 140012547129343,
+STORE, 94632939606016, 94632939741183,
+STORE, 140012530483200, 140012538875903,
+SNULL, 140012530487295, 140012538875903,
+STORE, 140012530483200, 140012530487295,
+STORE, 140012530487296, 140012538875903,
+STORE, 140012522090496, 140012530483199,
+STORE, 140012387872768, 140012522090495,
+SNULL, 140012387872768, 140012444188671,
+STORE, 140012444188672, 140012522090495,
+STORE, 140012387872768, 140012444188671,
+ERASE, 140012387872768, 140012444188671,
+SNULL, 140012511297535, 140012522090495,
+STORE, 140012444188672, 140012511297535,
+STORE, 140012511297536, 140012522090495,
+ERASE, 140012511297536, 140012522090495,
+SNULL, 140012444323839, 140012511297535,
+STORE, 140012444188672, 140012444323839,
+STORE, 140012444323840, 140012511297535,
+SNULL, 140012522094591, 140012530483199,
+STORE, 140012522090496, 140012522094591,
+STORE, 140012522094592, 140012530483199,
+STORE, 140012513697792, 140012522090495,
+SNULL, 140012513701887, 140012522090495,
+STORE, 140012513697792, 140012513701887,
+STORE, 140012513701888, 140012522090495,
+STORE, 140012435795968, 140012444188671,
+SNULL, 140012435800063, 140012444188671,
+STORE, 140012435795968, 140012435800063,
+STORE, 140012435800064, 140012444188671,
+STORE, 140012427403264, 140012435795967,
+SNULL, 140012427407359, 140012435795967,
+STORE, 140012427403264, 140012427407359,
+STORE, 140012427407360, 140012435795967,
+STORE, 140012419010560, 140012427403263,
+STORE, 140012410617856, 140012427403263,
+STORE, 140012276400128, 140012410617855,
+STORE, 140012268007424, 140012276400127,
+STORE, 140012133789696, 140012268007423,
+SNULL, 140012133789696, 140012175753215,
+STORE, 140012175753216, 140012268007423,
+STORE, 140012133789696, 140012175753215,
+ERASE, 140012133789696, 140012175753215,
+STORE, 140012041535488, 140012268007423,
+SNULL, 140012108644351, 140012268007423,
+STORE, 140012041535488, 140012108644351,
+STORE, 140012108644352, 140012268007423,
+SNULL, 140012108644352, 140012175753215,
+STORE, 140012175753216, 140012268007423,
+STORE, 140012108644352, 140012175753215,
+ERASE, 140012108644352, 140012175753215,
+SNULL, 140012276400128, 140012309970943,
+STORE, 140012309970944, 140012410617855,
+STORE, 140012276400128, 140012309970943,
+ERASE, 140012276400128, 140012309970943,
+STORE, 140012301578240, 140012309970943,
+STORE, 140012041535488, 140012268007423,
+SNULL, 140012242862079, 140012268007423,
+STORE, 140012041535488, 140012242862079,
+STORE, 140012242862080, 140012268007423,
+ERASE, 140012242862080, 140012268007423,
+SNULL, 140012041670655, 140012242862079,
+STORE, 140012041535488, 140012041670655,
+STORE, 140012041670656, 140012242862079,
+SNULL, 140012041670656, 140012108644351,
+STORE, 140012108644352, 140012242862079,
+STORE, 140012041670656, 140012108644351,
+SNULL, 140012108779519, 140012242862079,
+STORE, 140012108644352, 140012108779519,
+STORE, 140012108779520, 140012242862079,
+SNULL, 140012377079807, 140012410617855,
+STORE, 140012309970944, 140012377079807,
+STORE, 140012377079808, 140012410617855,
+ERASE, 140012377079808, 140012410617855,
+SNULL, 140012310106111, 140012377079807,
+STORE, 140012309970944, 140012310106111,
+STORE, 140012310106112, 140012377079807,
+SNULL, 140012410621951, 140012427403263,
+STORE, 140012410617856, 140012410621951,
+STORE, 140012410621952, 140012427403263,
+SNULL, 140012108779520, 140012175753215,
+STORE, 140012175753216, 140012242862079,
+STORE, 140012108779520, 140012175753215,
+SNULL, 140012175888383, 140012242862079,
+STORE, 140012175753216, 140012175888383,
+STORE, 140012175888384, 140012242862079,
+SNULL, 140012301582335, 140012309970943,
+STORE, 140012301578240, 140012301582335,
+STORE, 140012301582336, 140012309970943,
+SNULL, 140012410621952, 140012419010559,
+STORE, 140012419010560, 140012427403263,
+STORE, 140012410621952, 140012419010559,
+SNULL, 140012419014655, 140012427403263,
+STORE, 140012419010560, 140012419014655,
+STORE, 140012419014656, 140012427403263,
+SNULL, 140012268011519, 140012276400127,
+STORE, 140012268007424, 140012268011519,
+STORE, 140012268011520, 140012276400127,
+STORE, 140012402225152, 140012410617855,
+STORE, 140012393832448, 140012410617855,
+SNULL, 140012393832448, 140012402225151,
+STORE, 140012402225152, 140012410617855,
+STORE, 140012393832448, 140012402225151,
+SNULL, 140012402229247, 140012410617855,
+STORE, 140012402225152, 140012402229247,
+STORE, 140012402229248, 140012410617855,
+STORE, 140012385439744, 140012402225151,
+SNULL, 140012385439744, 140012393832447,
+STORE, 140012393832448, 140012402225151,
+STORE, 140012385439744, 140012393832447,
+SNULL, 140012393836543, 140012402225151,
+STORE, 140012393832448, 140012393836543,
+STORE, 140012393836544, 140012402225151,
+STORE, 140012293185536, 140012301578239,
+STORE, 140012284792832, 140012301578239,
+SNULL, 140012284792832, 140012293185535,
+STORE, 140012293185536, 140012301578239,
+STORE, 140012284792832, 140012293185535,
+SNULL, 140012293189631, 140012301578239,
+STORE, 140012293185536, 140012293189631,
+STORE, 140012293189632, 140012301578239,
+STORE, 140012268011520, 140012284792831,
+SNULL, 140012385443839, 140012393832447,
+STORE, 140012385439744, 140012385443839,
+STORE, 140012385443840, 140012393832447,
+STORE, 140012259614720, 140012268007423,
+SNULL, 140012259618815, 140012268007423,
+STORE, 140012259614720, 140012259618815,
+STORE, 140012259618816, 140012268007423,
+STORE, 140012251222016, 140012259614719,
+SNULL, 140012251226111, 140012259614719,
+STORE, 140012251222016, 140012251226111,
+STORE, 140012251226112, 140012259614719,
+SNULL, 140012284796927, 140012293185535,
+STORE, 140012284792832, 140012284796927,
+STORE, 140012284796928, 140012293185535,
+SNULL, 140012268011520, 140012276400127,
+STORE, 140012276400128, 140012284792831,
+STORE, 140012268011520, 140012276400127,
+SNULL, 140012276404223, 140012284792831,
+STORE, 140012276400128, 140012276404223,
+STORE, 140012276404224, 140012284792831,
+STORE, 140012033142784, 140012041535487,
+SNULL, 140012033146879, 140012041535487,
+STORE, 140012033142784, 140012033146879,
+STORE, 140012033146880, 140012041535487,
+STORE, 140012024750080, 140012033142783,
+STORE, 140012016357376, 140012033142783,
+SNULL, 140012016357376, 140012024750079,
+STORE, 140012024750080, 140012033142783,
+STORE, 140012016357376, 140012024750079,
+SNULL, 140012024754175, 140012033142783,
+STORE, 140012024750080, 140012024754175,
+STORE, 140012024754176, 140012033142783,
+SNULL, 140012016361471, 140012024750079,
+STORE, 140012016357376, 140012016361471,
+STORE, 140012016361472, 140012024750079,
+STORE, 140012007964672, 140012016357375,
+SNULL, 140012007968767, 140012016357375,
+STORE, 140012007964672, 140012007968767,
+STORE, 140012007968768, 140012016357375,
+STORE, 140011999571968, 140012007964671,
+STORE, 140011991179264, 140012007964671,
+STORE, 140011856961536, 140011991179263,
+STORE, 140011848568832, 140011856961535,
+STORE, 140011714351104, 140011848568831,
+SNULL, 140011714351104, 140011773100031,
+STORE, 140011773100032, 140011848568831,
+STORE, 140011714351104, 140011773100031,
+ERASE, 140011714351104, 140011773100031,
+STORE, 140011764707328, 140011773100031,
+STORE, 140011756314624, 140011773100031,
+STORE, 140011622096896, 140011756314623,
+STORE, 140011613704192, 140011622096895,
+STORE, 140011479486464, 140011613704191,
+STORE, 140011471093760, 140011479486463,
+SNULL, 140011479486464, 140011504664575,
+STORE, 140011504664576, 140011613704191,
+STORE, 140011479486464, 140011504664575,
+ERASE, 140011479486464, 140011504664575,
+STORE, 140011496271872, 140011504664575,
+STORE, 140011487879168, 140011504664575,
+STORE, 140011336876032, 140011471093759,
+SNULL, 140011336876032, 140011370446847,
+STORE, 140011370446848, 140011471093759,
+STORE, 140011336876032, 140011370446847,
+ERASE, 140011336876032, 140011370446847,
+STORE, 140011471093760, 140011487879167,
+STORE, 140011362054144, 140011370446847,
+SNULL, 140011362058239, 140011370446847,
+STORE, 140011362054144, 140011362058239,
+STORE, 140011362058240, 140011370446847,
+STORE, 140011353661440, 140011362054143,
+STORE, 140011345268736, 140011362054143,
+SNULL, 140011345272831, 140011362054143,
+STORE, 140011345268736, 140011345272831,
+STORE, 140011345272832, 140011362054143,
+STORE, 140011336876032, 140011345268735,
+STORE, 140011328483328, 140011345268735,
+SNULL, 140011328487423, 140011345268735,
+STORE, 140011328483328, 140011328487423,
+STORE, 140011328487424, 140011345268735,
+STORE, 140011320090624, 140011328483327,
+STORE, 140011185872896, 140011320090623,
+SNULL, 140011185872896, 140011236229119,
+STORE, 140011236229120, 140011320090623,
+STORE, 140011185872896, 140011236229119,
+ERASE, 140011185872896, 140011236229119,
+SNULL, 140011856961536, 140011907317759,
+STORE, 140011907317760, 140011991179263,
+STORE, 140011856961536, 140011907317759,
+ERASE, 140011856961536, 140011907317759,
+SNULL, 140011974426623, 140011991179263,
+STORE, 140011907317760, 140011974426623,
+STORE, 140011974426624, 140011991179263,
+ERASE, 140011974426624, 140011991179263,
+SNULL, 140011840208895, 140011848568831,
+STORE, 140011773100032, 140011840208895,
+STORE, 140011840208896, 140011848568831,
+ERASE, 140011840208896, 140011848568831,
+SNULL, 140011773235199, 140011840208895,
+STORE, 140011773100032, 140011773235199,
+STORE, 140011773235200, 140011840208895,
+STORE, 140011102011392, 140011320090623,
+SNULL, 140011169120255, 140011320090623,
+STORE, 140011102011392, 140011169120255,
+STORE, 140011169120256, 140011320090623,
+SNULL, 140011169120256, 140011236229119,
+STORE, 140011236229120, 140011320090623,
+STORE, 140011169120256, 140011236229119,
+ERASE, 140011169120256, 140011236229119,
+SNULL, 140011622096896, 140011638882303,
+STORE, 140011638882304, 140011756314623,
+STORE, 140011622096896, 140011638882303,
+ERASE, 140011622096896, 140011638882303,
+SNULL, 140011705991167, 140011756314623,
+STORE, 140011638882304, 140011705991167,
+STORE, 140011705991168, 140011756314623,
+ERASE, 140011705991168, 140011756314623,
+SNULL, 140011571773439, 140011613704191,
+STORE, 140011504664576, 140011571773439,
+STORE, 140011571773440, 140011613704191,
+ERASE, 140011571773440, 140011613704191,
+STORE, 140010967793664, 140011169120255,
+SNULL, 140011034902527, 140011169120255,
+STORE, 140010967793664, 140011034902527,
+STORE, 140011034902528, 140011169120255,
+SNULL, 140011034902528, 140011102011391,
+STORE, 140011102011392, 140011169120255,
+STORE, 140011034902528, 140011102011391,
+ERASE, 140011034902528, 140011102011391,
+STORE, 140010833575936, 140011034902527,
+SNULL, 140011437555711, 140011471093759,
+STORE, 140011370446848, 140011437555711,
+STORE, 140011437555712, 140011471093759,
+ERASE, 140011437555712, 140011471093759,
+SNULL, 140011370582015, 140011437555711,
+STORE, 140011370446848, 140011370582015,
+STORE, 140011370582016, 140011437555711,
+STORE, 140010699358208, 140011034902527,
+SNULL, 140011487883263, 140011504664575,
+STORE, 140011487879168, 140011487883263,
+STORE, 140011487883264, 140011504664575,
+SNULL, 140011345272832, 140011353661439,
+STORE, 140011353661440, 140011362054143,
+STORE, 140011345272832, 140011353661439,
+SNULL, 140011353665535, 140011362054143,
+STORE, 140011353661440, 140011353665535,
+STORE, 140011353665536, 140011362054143,
+SNULL, 140011328487424, 140011336876031,
+STORE, 140011336876032, 140011345268735,
+STORE, 140011328487424, 140011336876031,
+SNULL, 140011336880127, 140011345268735,
+STORE, 140011336876032, 140011336880127,
+STORE, 140011336880128, 140011345268735,
+SNULL, 140011303337983, 140011320090623,
+STORE, 140011236229120, 140011303337983,
+STORE, 140011303337984, 140011320090623,
+ERASE, 140011303337984, 140011320090623,
+SNULL, 140011907452927, 140011974426623,
+STORE, 140011907317760, 140011907452927,
+STORE, 140011907452928, 140011974426623,
+SNULL, 140011102146559, 140011169120255,
+STORE, 140011102011392, 140011102146559,
+STORE, 140011102146560, 140011169120255,
+SNULL, 140011639017471, 140011705991167,
+STORE, 140011638882304, 140011639017471,
+STORE, 140011639017472, 140011705991167,
+SNULL, 140011504799743, 140011571773439,
+STORE, 140011504664576, 140011504799743,
+STORE, 140011504799744, 140011571773439,
+SNULL, 140011613708287, 140011622096895,
+STORE, 140011613704192, 140011613708287,
+STORE, 140011613708288, 140011622096895,
+SNULL, 140010699358208, 140010967793663,
+STORE, 140010967793664, 140011034902527,
+STORE, 140010699358208, 140010967793663,
+SNULL, 140010967928831, 140011034902527,
+STORE, 140010967793664, 140010967928831,
+STORE, 140010967928832, 140011034902527,
+SNULL, 140010900684799, 140010967793663,
+STORE, 140010699358208, 140010900684799,
+STORE, 140010900684800, 140010967793663,
+ERASE, 140010900684800, 140010967793663,
+SNULL, 140010766467071, 140010900684799,
+STORE, 140010699358208, 140010766467071,
+STORE, 140010766467072, 140010900684799,
+SNULL, 140010766467072, 140010833575935,
+STORE, 140010833575936, 140010900684799,
+STORE, 140010766467072, 140010833575935,
+ERASE, 140010766467072, 140010833575935,
+SNULL, 140010699493375, 140010766467071,
+STORE, 140010699358208, 140010699493375,
+STORE, 140010699493376, 140010766467071,
+SNULL, 140011848572927, 140011856961535,
+STORE, 140011848568832, 140011848572927,
+STORE, 140011848572928, 140011856961535,
+STORE, 140011982786560, 140012007964671,
+STORE, 140011898925056, 140011907317759,
+SNULL, 140011898929151, 140011907317759,
+STORE, 140011898925056, 140011898929151,
+STORE, 140011898929152, 140011907317759,
+SNULL, 140011320094719, 140011328483327,
+STORE, 140011320090624, 140011320094719,
+STORE, 140011320094720, 140011328483327,
+STORE, 140011890532352, 140011898925055,
+STORE, 140011882139648, 140011898925055,
+SNULL, 140011882143743, 140011898925055,
+STORE, 140011882139648, 140011882143743,
+STORE, 140011882143744, 140011898925055,
+STORE, 140011873746944, 140011882139647,
+SNULL, 140011873751039, 140011882139647,
+STORE, 140011873746944, 140011873751039,
+STORE, 140011873751040, 140011882139647,
+SNULL, 140011236364287, 140011303337983,
+STORE, 140011236229120, 140011236364287,
+STORE, 140011236364288, 140011303337983,
+SNULL, 140011756318719, 140011773100031,
+STORE, 140011756314624, 140011756318719,
+STORE, 140011756318720, 140011773100031,
+SNULL, 140011756318720, 140011764707327,
+STORE, 140011764707328, 140011773100031,
+STORE, 140011756318720, 140011764707327,
+SNULL, 140011764711423, 140011773100031,
+STORE, 140011764707328, 140011764711423,
+STORE, 140011764711424, 140011773100031,
+SNULL, 140011471097855, 140011487879167,
+STORE, 140011471093760, 140011471097855,
+STORE, 140011471097856, 140011487879167,
+SNULL, 140010833711103, 140010900684799,
+STORE, 140010833575936, 140010833711103,
+STORE, 140010833711104, 140010900684799,
+SNULL, 140011982790655, 140012007964671,
+STORE, 140011982786560, 140011982790655,
+STORE, 140011982790656, 140012007964671,
+STORE, 140011865354240, 140011873746943,
+STORE, 140011848572928, 140011865354239,
+SNULL, 140011848572928, 140011856961535,
+STORE, 140011856961536, 140011865354239,
+STORE, 140011848572928, 140011856961535,
+SNULL, 140011856965631, 140011865354239,
+STORE, 140011856961536, 140011856965631,
+STORE, 140011856965632, 140011865354239,
+STORE, 140011747921920, 140011756314623,
+STORE, 140011739529216, 140011756314623,
+SNULL, 140011471097856, 140011479486463,
+STORE, 140011479486464, 140011487879167,
+STORE, 140011471097856, 140011479486463,
+SNULL, 140011479490559, 140011487879167,
+STORE, 140011479486464, 140011479490559,
+STORE, 140011479490560, 140011487879167,
+STORE, 140011731136512, 140011756314623,
+STORE, 140011722743808, 140011756314623,
+SNULL, 140011982790656, 140011999571967,
+STORE, 140011999571968, 140012007964671,
+STORE, 140011982790656, 140011999571967,
+SNULL, 140011999576063, 140012007964671,
+STORE, 140011999571968, 140011999576063,
+STORE, 140011999576064, 140012007964671,
+STORE, 140011714351104, 140011756314623,
+SNULL, 140011882143744, 140011890532351,
+STORE, 140011890532352, 140011898925055,
+STORE, 140011882143744, 140011890532351,
+SNULL, 140011890536447, 140011898925055,
+STORE, 140011890532352, 140011890536447,
+STORE, 140011890536448, 140011898925055,
+STORE, 140011630489600, 140011638882303,
+STORE, 140011613708288, 140011638882303,
+STORE, 140011605311488, 140011613704191,
+STORE, 140011596918784, 140011613704191,
+STORE, 140011588526080, 140011613704191,
+SNULL, 140011487883264, 140011496271871,
+STORE, 140011496271872, 140011504664575,
+STORE, 140011487883264, 140011496271871,
+SNULL, 140011496275967, 140011504664575,
+STORE, 140011496271872, 140011496275967,
+STORE, 140011496275968, 140011504664575,
+STORE, 140011580133376, 140011613704191,
+SNULL, 140011580137471, 140011613704191,
+STORE, 140011580133376, 140011580137471,
+STORE, 140011580137472, 140011613704191,
+SNULL, 140011982790656, 140011991179263,
+STORE, 140011991179264, 140011999571967,
+STORE, 140011982790656, 140011991179263,
+SNULL, 140011991183359, 140011999571967,
+STORE, 140011991179264, 140011991183359,
+STORE, 140011991183360, 140011999571967,
+SNULL, 140011865358335, 140011873746943,
+STORE, 140011865354240, 140011865358335,
+STORE, 140011865358336, 140011873746943,
+STORE, 140011462701056, 140011471093759,
+SNULL, 140011714351104, 140011739529215,
+STORE, 140011739529216, 140011756314623,
+STORE, 140011714351104, 140011739529215,
+SNULL, 140011739533311, 140011756314623,
+STORE, 140011739529216, 140011739533311,
+STORE, 140011739533312, 140011756314623,
+SNULL, 140011739533312, 140011747921919,
+STORE, 140011747921920, 140011756314623,
+STORE, 140011739533312, 140011747921919,
+SNULL, 140011747926015, 140011756314623,
+STORE, 140011747921920, 140011747926015,
+STORE, 140011747926016, 140011756314623,
+SNULL, 140011613708288, 140011630489599,
+STORE, 140011630489600, 140011638882303,
+STORE, 140011613708288, 140011630489599,
+SNULL, 140011630493695, 140011638882303,
+STORE, 140011630489600, 140011630493695,
+STORE, 140011630493696, 140011638882303,
+SNULL, 140011714351104, 140011722743807,
+STORE, 140011722743808, 140011739529215,
+STORE, 140011714351104, 140011722743807,
+SNULL, 140011722747903, 140011739529215,
+STORE, 140011722743808, 140011722747903,
+STORE, 140011722747904, 140011739529215,
+SNULL, 140011714355199, 140011722743807,
+STORE, 140011714351104, 140011714355199,
+STORE, 140011714355200, 140011722743807,
+SNULL, 140011722747904, 140011731136511,
+STORE, 140011731136512, 140011739529215,
+STORE, 140011722747904, 140011731136511,
+SNULL, 140011731140607, 140011739529215,
+STORE, 140011731136512, 140011731140607,
+STORE, 140011731140608, 140011739529215,
+STORE, 140011454308352, 140011471093759,
+STORE, 140011445915648, 140011471093759,
+SNULL, 140011580137472, 140011588526079,
+STORE, 140011588526080, 140011613704191,
+STORE, 140011580137472, 140011588526079,
+SNULL, 140011588530175, 140011613704191,
+STORE, 140011588526080, 140011588530175,
+STORE, 140011588530176, 140011613704191,
+SNULL, 140011445915648, 140011462701055,
+STORE, 140011462701056, 140011471093759,
+STORE, 140011445915648, 140011462701055,
+SNULL, 140011462705151, 140011471093759,
+STORE, 140011462701056, 140011462705151,
+STORE, 140011462705152, 140011471093759,
+SNULL, 140011588530176, 140011596918783,
+STORE, 140011596918784, 140011613704191,
+STORE, 140011588530176, 140011596918783,
+SNULL, 140011596922879, 140011613704191,
+STORE, 140011596918784, 140011596922879,
+STORE, 140011596922880, 140011613704191,
+SNULL, 140011596922880, 140011605311487,
+STORE, 140011605311488, 140011613704191,
+STORE, 140011596922880, 140011605311487,
+SNULL, 140011605315583, 140011613704191,
+STORE, 140011605311488, 140011605315583,
+STORE, 140011605315584, 140011613704191,
+SNULL, 140011613708288, 140011622096895,
+STORE, 140011622096896, 140011630489599,
+STORE, 140011613708288, 140011622096895,
+SNULL, 140011622100991, 140011630489599,
+STORE, 140011622096896, 140011622100991,
+STORE, 140011622100992, 140011630489599,
+STORE, 140011311697920, 140011320090623,
+STORE, 140011227836416, 140011236229119,
+STORE, 140011219443712, 140011236229119,
+SNULL, 140011219447807, 140011236229119,
+STORE, 140011219443712, 140011219447807,
+STORE, 140011219447808, 140011236229119,
+STORE, 140011211051008, 140011219443711,
+STORE, 140011202658304, 140011219443711,
+SNULL, 140011202662399, 140011219443711,
+STORE, 140011202658304, 140011202662399,
+STORE, 140011202662400, 140011219443711,
+STORE, 140011194265600, 140011202658303,
+STORE, 140011185872896, 140011202658303,
+STORE, 140011177480192, 140011202658303,
+STORE, 140011093618688, 140011102011391,
+SNULL, 140011445915648, 140011454308351,
+STORE, 140011454308352, 140011462701055,
+STORE, 140011445915648, 140011454308351,
+SNULL, 140011454312447, 140011462701055,
+STORE, 140011454308352, 140011454312447,
+STORE, 140011454312448, 140011462701055,
+STORE, 140011085225984, 140011102011391,
+SNULL, 140011085230079, 140011102011391,
+STORE, 140011085225984, 140011085230079,
+STORE, 140011085230080, 140011102011391,
+SNULL, 140011177484287, 140011202658303,
+STORE, 140011177480192, 140011177484287,
+STORE, 140011177484288, 140011202658303,
+SNULL, 140011445919743, 140011454308351,
+STORE, 140011445915648, 140011445919743,
+STORE, 140011445919744, 140011454308351,
+SNULL, 140011177484288, 140011185872895,
+STORE, 140011185872896, 140011202658303,
+STORE, 140011177484288, 140011185872895,
+SNULL, 140011185876991, 140011202658303,
+STORE, 140011185872896, 140011185876991,
+STORE, 140011185876992, 140011202658303,
+STORE, 140011076833280, 140011085225983,
+SNULL, 140011202662400, 140011211051007,
+STORE, 140011211051008, 140011219443711,
+STORE, 140011202662400, 140011211051007,
+SNULL, 140011211055103, 140011219443711,
+STORE, 140011211051008, 140011211055103,
+STORE, 140011211055104, 140011219443711,
+SNULL, 140011185876992, 140011194265599,
+STORE, 140011194265600, 140011202658303,
+STORE, 140011185876992, 140011194265599,
+SNULL, 140011194269695, 140011202658303,
+STORE, 140011194265600, 140011194269695,
+STORE, 140011194269696, 140011202658303,
+STORE, 140011068440576, 140011085225983,
+SNULL, 140011311702015, 140011320090623,
+STORE, 140011311697920, 140011311702015,
+STORE, 140011311702016, 140011320090623,
+STORE, 140011060047872, 140011085225983,
+SNULL, 140011060051967, 140011085225983,
+STORE, 140011060047872, 140011060051967,
+STORE, 140011060051968, 140011085225983,
+STORE, 140011051655168, 140011060047871,
+STORE, 140011043262464, 140011060047871,
+SNULL, 140011043266559, 140011060047871,
+STORE, 140011043262464, 140011043266559,
+STORE, 140011043266560, 140011060047871,
+SNULL, 140011219447808, 140011227836415,
+STORE, 140011227836416, 140011236229119,
+STORE, 140011219447808, 140011227836415,
+SNULL, 140011227840511, 140011236229119,
+STORE, 140011227836416, 140011227840511,
+STORE, 140011227840512, 140011236229119,
+SNULL, 140011085230080, 140011093618687,
+STORE, 140011093618688, 140011102011391,
+STORE, 140011085230080, 140011093618687,
+SNULL, 140011093622783, 140011102011391,
+STORE, 140011093618688, 140011093622783,
+STORE, 140011093622784, 140011102011391,
+STORE, 140010959400960, 140010967793663,
+STORE, 140010951008256, 140010967793663,
+SNULL, 140010951008256, 140010959400959,
+STORE, 140010959400960, 140010967793663,
+STORE, 140010951008256, 140010959400959,
+SNULL, 140010959405055, 140010967793663,
+STORE, 140010959400960, 140010959405055,
+STORE, 140010959405056, 140010967793663,
+STORE, 140010942615552, 140010959400959,
+STORE, 140010934222848, 140010959400959,
+SNULL, 140011060051968, 140011076833279,
+STORE, 140011076833280, 140011085225983,
+STORE, 140011060051968, 140011076833279,
+SNULL, 140011076837375, 140011085225983,
+STORE, 140011076833280, 140011076837375,
+STORE, 140011076837376, 140011085225983,
+SNULL, 140011043266560, 140011051655167,
+STORE, 140011051655168, 140011060047871,
+STORE, 140011043266560, 140011051655167,
+SNULL, 140011051659263, 140011060047871,
+STORE, 140011051655168, 140011051659263,
+STORE, 140011051659264, 140011060047871,
+STORE, 140010925830144, 140010959400959,
+SNULL, 140011060051968, 140011068440575,
+STORE, 140011068440576, 140011076833279,
+STORE, 140011060051968, 140011068440575,
+SNULL, 140011068444671, 140011076833279,
+STORE, 140011068440576, 140011068444671,
+STORE, 140011068444672, 140011076833279,
+STORE, 140010917437440, 140010959400959,
+STORE, 140010909044736, 140010959400959,
+STORE, 140010825183232, 140010833575935,
+SNULL, 140010909044736, 140010942615551,
+STORE, 140010942615552, 140010959400959,
+STORE, 140010909044736, 140010942615551,
+SNULL, 140010942619647, 140010959400959,
+STORE, 140010942615552, 140010942619647,
+STORE, 140010942619648, 140010959400959,
+SNULL, 140010909044736, 140010934222847,
+STORE, 140010934222848, 140010942615551,
+STORE, 140010909044736, 140010934222847,
+SNULL, 140010934226943, 140010942615551,
+STORE, 140010934222848, 140010934226943,
+STORE, 140010934226944, 140010942615551,
+SNULL, 140010909048831, 140010934222847,
+STORE, 140010909044736, 140010909048831,
+STORE, 140010909048832, 140010934222847,
+STORE, 140010816790528, 140010833575935,
+SNULL, 140010816794623, 140010833575935,
+STORE, 140010816790528, 140010816794623,
+STORE, 140010816794624, 140010833575935,
+STORE, 140010808397824, 140010816790527,
+SNULL, 140010942619648, 140010951008255,
+STORE, 140010951008256, 140010959400959,
+STORE, 140010942619648, 140010951008255,
+SNULL, 140010951012351, 140010959400959,
+STORE, 140010951008256, 140010951012351,
+STORE, 140010951012352, 140010959400959,
+STORE, 140010800005120, 140010816790527,
+SNULL, 140010800009215, 140010816790527,
+STORE, 140010800005120, 140010800009215,
+STORE, 140010800009216, 140010816790527,
+SNULL, 140010909048832, 140010925830143,
+STORE, 140010925830144, 140010934222847,
+STORE, 140010909048832, 140010925830143,
+SNULL, 140010925834239, 140010934222847,
+STORE, 140010925830144, 140010925834239,
+STORE, 140010925834240, 140010934222847,
+SNULL, 140010816794624, 140010825183231,
+STORE, 140010825183232, 140010833575935,
+STORE, 140010816794624, 140010825183231,
+SNULL, 140010825187327, 140010833575935,
+STORE, 140010825183232, 140010825187327,
+STORE, 140010825187328, 140010833575935,
+SNULL, 140010909048832, 140010917437439,
+STORE, 140010917437440, 140010925830143,
+STORE, 140010909048832, 140010917437439,
+SNULL, 140010917441535, 140010925830143,
+STORE, 140010917437440, 140010917441535,
+STORE, 140010917441536, 140010925830143,
+SNULL, 140010800009216, 140010808397823,
+STORE, 140010808397824, 140010816790527,
+STORE, 140010800009216, 140010808397823,
+SNULL, 140010808401919, 140010816790527,
+STORE, 140010808397824, 140010808401919,
+STORE, 140010808401920, 140010816790527,
+STORE, 140010791612416, 140010800005119,
+SNULL, 140010791616511, 140010800005119,
+STORE, 140010791612416, 140010791616511,
+STORE, 140010791616512, 140010800005119,
+STORE, 140012547100672, 140012547129343,
+STORE, 140012511506432, 140012513697791,
+SNULL, 140012511506432, 140012511596543,
+STORE, 140012511596544, 140012513697791,
+STORE, 140012511506432, 140012511596543,
+SNULL, 140012513689599, 140012513697791,
+STORE, 140012511596544, 140012513689599,
+STORE, 140012513689600, 140012513697791,
+ERASE, 140012513689600, 140012513697791,
+STORE, 140012513689600, 140012513697791,
+SNULL, 140012513693695, 140012513697791,
+STORE, 140012513689600, 140012513693695,
+STORE, 140012513693696, 140012513697791,
+ERASE, 140012547100672, 140012547129343,
+ERASE, 140011362054144, 140011362058239,
+ERASE, 140011362058240, 140011370446847,
+ERASE, 140011882139648, 140011882143743,
+ERASE, 140011882143744, 140011890532351,
+ERASE, 140011873746944, 140011873751039,
+ERASE, 140011873751040, 140011882139647,
+ERASE, 140011588526080, 140011588530175,
+ERASE, 140011588530176, 140011596918783,
+ERASE, 140011328483328, 140011328487423,
+ERASE, 140011328487424, 140011336876031,
+ERASE, 140011898925056, 140011898929151,
+ERASE, 140011898929152, 140011907317759,
+ERASE, 140011353661440, 140011353665535,
+ERASE, 140011353665536, 140011362054143,
+ERASE, 140011336876032, 140011336880127,
+ERASE, 140011336880128, 140011345268735,
+ERASE, 140011731136512, 140011731140607,
+ERASE, 140011731140608, 140011739529215,
+ERASE, 140011479486464, 140011479490559,
+ERASE, 140011479490560, 140011487879167,
+ERASE, 140011756314624, 140011756318719,
+ERASE, 140011756318720, 140011764707327,
+ERASE, 140011580133376, 140011580137471,
+ERASE, 140011580137472, 140011588526079,
+ERASE, 140011219443712, 140011219447807,
+ERASE, 140011219447808, 140011227836415,
+ERASE, 140011051655168, 140011051659263,
+ERASE, 140011051659264, 140011060047871,
+ERASE, 140011999571968, 140011999576063,
+ERASE, 140011999576064, 140012007964671,
+ERASE, 140011714351104, 140011714355199,
+ERASE, 140011714355200, 140011722743807,
+ERASE, 140011739529216, 140011739533311,
+ERASE, 140011739533312, 140011747921919,
+ERASE, 140011320090624, 140011320094719,
+ERASE, 140011320094720, 140011328483327,
+ERASE, 140011630489600, 140011630493695,
+ERASE, 140011630493696, 140011638882303,
+ERASE, 140011345268736, 140011345272831,
+ERASE, 140011345272832, 140011353661439,
+ERASE, 140011496271872, 140011496275967,
+ERASE, 140011496275968, 140011504664575,
+ERASE, 140011194265600, 140011194269695,
+ERASE, 140011194269696, 140011202658303,
+ERASE, 140011068440576, 140011068444671,
+ERASE, 140011068444672, 140011076833279,
+ERASE, 140010909044736, 140010909048831,
+ERASE, 140010909048832, 140010917437439,
+ERASE, 140011764707328, 140011764711423,
+ERASE, 140011764711424, 140011773100031,
+ERASE, 140011462701056, 140011462705151,
+ERASE, 140011462705152, 140011471093759,
+ERASE, 140011076833280, 140011076837375,
+ERASE, 140011076837376, 140011085225983,
+ERASE, 140011991179264, 140011991183359,
+ERASE, 140011991183360, 140011999571967,
+ERASE, 140011211051008, 140011211055103,
+ERASE, 140011211055104, 140011219443711,
+ERASE, 140010917437440, 140010917441535,
+ERASE, 140010917441536, 140010925830143,
+ERASE, 140011085225984, 140011085230079,
+ERASE, 140011085230080, 140011093618687,
+ERASE, 140011487879168, 140011487883263,
+ERASE, 140011487883264, 140011496271871,
+ERASE, 140011856961536, 140011856965631,
+ERASE, 140011856965632, 140011865354239,
+ERASE, 140011982786560, 140011982790655,
+ERASE, 140011982790656, 140011991179263,
+ERASE, 140011722743808, 140011722747903,
+ERASE, 140011722747904, 140011731136511,
+ERASE, 140011177480192, 140011177484287,
+ERASE, 140011177484288, 140011185872895,
+ERASE, 140011848568832, 140011848572927,
+ERASE, 140011848572928, 140011856961535,
+ERASE, 140011890532352, 140011890536447,
+ERASE, 140011890536448, 140011898925055,
+ERASE, 140011622096896, 140011622100991,
+ERASE, 140011622100992, 140011630489599,
+ERASE, 140011311697920, 140011311702015,
+ERASE, 140011311702016, 140011320090623,
+ERASE, 140011471093760, 140011471097855,
+ERASE, 140011471097856, 140011479486463,
+ERASE, 140011605311488, 140011605315583,
+ERASE, 140011605315584, 140011613704191,
+ERASE, 140010791612416, 140010791616511,
+ERASE, 140010791616512, 140010800005119,
+ERASE, 140010959400960, 140010959405055,
+ERASE, 140010959405056, 140010967793663,
+ERASE, 140011185872896, 140011185876991,
+ERASE, 140011185876992, 140011194265599,
+ERASE, 140011454308352, 140011454312447,
+ERASE, 140011454312448, 140011462701055,
+ERASE, 140011596918784, 140011596922879,
+ERASE, 140011596922880, 140011605311487,
+ERASE, 140011060047872, 140011060051967,
+ERASE, 140011060051968, 140011068440575,
+ERASE, 140010925830144, 140010925834239,
+ERASE, 140010925834240, 140010934222847,
+ERASE, 140011747921920, 140011747926015,
+ERASE, 140011747926016, 140011756314623,
+ERASE, 140011202658304, 140011202662399,
+ERASE, 140011202662400, 140011211051007,
+ERASE, 140010800005120, 140010800009215,
+ERASE, 140010800009216, 140010808397823,
+ERASE, 140011093618688, 140011093622783,
+ERASE, 140011093622784, 140011102011391,
+ERASE, 140010808397824, 140010808401919,
+ERASE, 140010808401920, 140010816790527,
+ERASE, 140012419010560, 140012419014655,
+ERASE, 140012419014656, 140012427403263,
+ERASE, 140010934222848, 140010934226943,
+ERASE, 140010934226944, 140010942615551,
+ERASE, 140010942615552, 140010942619647,
+ERASE, 140010942619648, 140010951008255,
+ERASE, 140011613704192, 140011613708287,
+ERASE, 140011613708288, 140011622096895,
+ERASE, 140011865354240, 140011865358335,
+ERASE, 140011865358336, 140011873746943,
+ERASE, 140012301578240, 140012301582335,
+ERASE, 140012301582336, 140012309970943,
+ERASE, 140012393832448, 140012393836543,
+ERASE, 140012393836544, 140012402225151,
+ERASE, 140012410617856, 140012410621951,
+ERASE, 140012410621952, 140012419010559,
+ERASE, 140012402225152, 140012402229247,
+ERASE, 140012402229248, 140012410617855,
+ERASE, 140012259614720, 140012259618815,
+ERASE, 140012259618816, 140012268007423,
+ERASE, 140012251222016, 140012251226111,
+ERASE, 140012251226112, 140012259614719,
+ERASE, 140012284792832, 140012284796927,
+ERASE, 140012284796928, 140012293185535,
+ERASE, 140011445915648, 140011445919743,
+ERASE, 140011445919744, 140011454308351,
+ERASE, 140010951008256, 140010951012351,
+ERASE, 140010951012352, 140010959400959,
+ERASE, 140011043262464, 140011043266559,
+ERASE, 140011043266560, 140011051655167,
+ERASE, 140010825183232, 140010825187327,
+ERASE, 140010825187328, 140010833575935,
+ERASE, 140012293185536, 140012293189631,
+ERASE, 140012293189632, 140012301578239,
+ERASE, 140012276400128, 140012276404223,
+ERASE, 140012276404224, 140012284792831,
+ERASE, 140012016357376, 140012016361471,
+ERASE, 140012016361472, 140012024750079,
+ERASE, 140012024750080, 140012024754175,
+ERASE, 140012024754176, 140012033142783,
+ERASE, 140011227836416, 140011227840511,
+ERASE, 140011227840512, 140011236229119,
+ERASE, 140010816790528, 140010816794623,
+ERASE, 140010816794624, 140010825183231,
+ERASE, 140012268007424, 140012268011519,
+ERASE, 140012268011520, 140012276400127,
+ERASE, 140012385439744, 140012385443839,
+ERASE, 140012385443840, 140012393832447,
+ERASE, 140012522090496, 140012522094591,
+ERASE, 140012522094592, 140012530483199,
+ERASE, 140012033142784, 140012033146879,
+ERASE, 140012033146880, 140012041535487,
+ };
+ unsigned long set35[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730536939520, 140737488351231,
+SNULL, 140730536943615, 140737488351231,
+STORE, 140730536939520, 140730536943615,
+STORE, 140730536808448, 140730536943615,
+STORE, 94245239877632, 94245242130431,
+SNULL, 94245240008703, 94245242130431,
+STORE, 94245239877632, 94245240008703,
+STORE, 94245240008704, 94245242130431,
+ERASE, 94245240008704, 94245242130431,
+STORE, 94245242101760, 94245242109951,
+STORE, 94245242109952, 94245242130431,
+STORE, 140475575263232, 140475577516031,
+SNULL, 140475575406591, 140475577516031,
+STORE, 140475575263232, 140475575406591,
+STORE, 140475575406592, 140475577516031,
+ERASE, 140475575406592, 140475577516031,
+STORE, 140475577503744, 140475577511935,
+STORE, 140475577511936, 140475577516031,
+STORE, 140730538164224, 140730538168319,
+STORE, 140730538151936, 140730538164223,
+STORE, 140475577475072, 140475577503743,
+STORE, 140475577466880, 140475577475071,
+STORE, 140475573047296, 140475575263231,
+SNULL, 140475573047296, 140475573145599,
+STORE, 140475573145600, 140475575263231,
+STORE, 140475573047296, 140475573145599,
+SNULL, 140475575238655, 140475575263231,
+STORE, 140475573145600, 140475575238655,
+STORE, 140475575238656, 140475575263231,
+SNULL, 140475575238656, 140475575246847,
+STORE, 140475575246848, 140475575263231,
+STORE, 140475575238656, 140475575246847,
+ERASE, 140475575238656, 140475575246847,
+STORE, 140475575238656, 140475575246847,
+ERASE, 140475575246848, 140475575263231,
+STORE, 140475575246848, 140475575263231,
+STORE, 140475569250304, 140475573047295,
+SNULL, 140475569250304, 140475570909183,
+STORE, 140475570909184, 140475573047295,
+STORE, 140475569250304, 140475570909183,
+SNULL, 140475573006335, 140475573047295,
+STORE, 140475570909184, 140475573006335,
+STORE, 140475573006336, 140475573047295,
+SNULL, 140475573006336, 140475573030911,
+STORE, 140475573030912, 140475573047295,
+STORE, 140475573006336, 140475573030911,
+ERASE, 140475573006336, 140475573030911,
+STORE, 140475573006336, 140475573030911,
+ERASE, 140475573030912, 140475573047295,
+STORE, 140475573030912, 140475573047295,
+STORE, 140475577458688, 140475577475071,
+SNULL, 140475573022719, 140475573030911,
+STORE, 140475573006336, 140475573022719,
+STORE, 140475573022720, 140475573030911,
+SNULL, 140475575242751, 140475575246847,
+STORE, 140475575238656, 140475575242751,
+STORE, 140475575242752, 140475575246847,
+SNULL, 94245242105855, 94245242109951,
+STORE, 94245242101760, 94245242105855,
+STORE, 94245242105856, 94245242109951,
+SNULL, 140475577507839, 140475577511935,
+STORE, 140475577503744, 140475577507839,
+STORE, 140475577507840, 140475577511935,
+ERASE, 140475577475072, 140475577503743,
+STORE, 94245271216128, 94245271351295,
+STORE, 140475560857600, 140475569250303,
+SNULL, 140475560861695, 140475569250303,
+STORE, 140475560857600, 140475560861695,
+STORE, 140475560861696, 140475569250303,
+STORE, 140475552464896, 140475560857599,
+STORE, 140475418247168, 140475552464895,
+SNULL, 140475418247168, 140475428241407,
+STORE, 140475428241408, 140475552464895,
+STORE, 140475418247168, 140475428241407,
+ERASE, 140475418247168, 140475428241407,
+SNULL, 140475495350271, 140475552464895,
+STORE, 140475428241408, 140475495350271,
+STORE, 140475495350272, 140475552464895,
+ERASE, 140475495350272, 140475552464895,
+SNULL, 140475428376575, 140475495350271,
+STORE, 140475428241408, 140475428376575,
+STORE, 140475428376576, 140475495350271,
+SNULL, 140475552468991, 140475560857599,
+STORE, 140475552464896, 140475552468991,
+STORE, 140475552468992, 140475560857599,
+STORE, 140475544072192, 140475552464895,
+SNULL, 140475544076287, 140475552464895,
+STORE, 140475544072192, 140475544076287,
+STORE, 140475544076288, 140475552464895,
+STORE, 140475535679488, 140475544072191,
+SNULL, 140475535683583, 140475544072191,
+STORE, 140475535679488, 140475535683583,
+STORE, 140475535683584, 140475544072191,
+STORE, 140475527286784, 140475535679487,
+SNULL, 140475527290879, 140475535679487,
+STORE, 140475527286784, 140475527290879,
+STORE, 140475527290880, 140475535679487,
+STORE, 140475518894080, 140475527286783,
+STORE, 140475510501376, 140475527286783,
+STORE, 140475502108672, 140475527286783,
+STORE, 140475419848704, 140475428241407,
+STORE, 140475285630976, 140475419848703,
+SNULL, 140475285630976, 140475294023679,
+STORE, 140475294023680, 140475419848703,
+STORE, 140475285630976, 140475294023679,
+ERASE, 140475285630976, 140475294023679,
+STORE, 140475159805952, 140475419848703,
+STORE, 140475025588224, 140475419848703,
+SNULL, 140475092697087, 140475419848703,
+STORE, 140475025588224, 140475092697087,
+STORE, 140475092697088, 140475419848703,
+SNULL, 140475092697088, 140475159805951,
+STORE, 140475159805952, 140475419848703,
+STORE, 140475092697088, 140475159805951,
+ERASE, 140475092697088, 140475159805951,
+STORE, 140474891370496, 140475092697087,
+SNULL, 140474958479359, 140475092697087,
+STORE, 140474891370496, 140474958479359,
+STORE, 140474958479360, 140475092697087,
+SNULL, 140474958479360, 140475025588223,
+STORE, 140475025588224, 140475092697087,
+STORE, 140474958479360, 140475025588223,
+ERASE, 140474958479360, 140475025588223,
+SNULL, 140475361132543, 140475419848703,
+STORE, 140475159805952, 140475361132543,
+STORE, 140475361132544, 140475419848703,
+ERASE, 140475361132544, 140475419848703,
+SNULL, 140475159805952, 140475294023679,
+STORE, 140475294023680, 140475361132543,
+STORE, 140475159805952, 140475294023679,
+SNULL, 140475294158847, 140475361132543,
+STORE, 140475294023680, 140475294158847,
+STORE, 140475294158848, 140475361132543,
+SNULL, 140475226914815, 140475294023679,
+STORE, 140475159805952, 140475226914815,
+STORE, 140475226914816, 140475294023679,
+ERASE, 140475226914816, 140475294023679,
+SNULL, 140475025723391, 140475092697087,
+STORE, 140475025588224, 140475025723391,
+STORE, 140475025723392, 140475092697087,
+SNULL, 140475159941119, 140475226914815,
+STORE, 140475159805952, 140475159941119,
+STORE, 140475159941120, 140475226914815,
+SNULL, 140474891505663, 140474958479359,
+STORE, 140474891370496, 140474891505663,
+STORE, 140474891505664, 140474958479359,
+SNULL, 140475502108672, 140475518894079,
+STORE, 140475518894080, 140475527286783,
+STORE, 140475502108672, 140475518894079,
+SNULL, 140475518898175, 140475527286783,
+STORE, 140475518894080, 140475518898175,
+STORE, 140475518898176, 140475527286783,
+STORE, 140475411456000, 140475428241407,
+SNULL, 140475502112767, 140475518894079,
+STORE, 140475502108672, 140475502112767,
+STORE, 140475502112768, 140475518894079,
+SNULL, 140475411460095, 140475428241407,
+STORE, 140475411456000, 140475411460095,
+STORE, 140475411460096, 140475428241407,
+SNULL, 140475411460096, 140475419848703,
+STORE, 140475419848704, 140475428241407,
+STORE, 140475411460096, 140475419848703,
+SNULL, 140475419852799, 140475428241407,
+STORE, 140475419848704, 140475419852799,
+STORE, 140475419852800, 140475428241407,
+STORE, 140475403063296, 140475411455999,
+SNULL, 140475502112768, 140475510501375,
+STORE, 140475510501376, 140475518894079,
+STORE, 140475502112768, 140475510501375,
+SNULL, 140475510505471, 140475518894079,
+STORE, 140475510501376, 140475510505471,
+STORE, 140475510505472, 140475518894079,
+SNULL, 140475403067391, 140475411455999,
+STORE, 140475403063296, 140475403067391,
+STORE, 140475403067392, 140475411455999,
+STORE, 140475394670592, 140475403063295,
+SNULL, 140475394674687, 140475403063295,
+STORE, 140475394670592, 140475394674687,
+STORE, 140475394674688, 140475403063295,
+STORE, 140475386277888, 140475394670591,
+STORE, 140475377885184, 140475394670591,
+STORE, 140475369492480, 140475394670591,
+SNULL, 140475369496575, 140475394670591,
+STORE, 140475369492480, 140475369496575,
+STORE, 140475369496576, 140475394670591,
+SNULL, 140475369496576, 140475377885183,
+STORE, 140475377885184, 140475394670591,
+STORE, 140475369496576, 140475377885183,
+SNULL, 140475377889279, 140475394670591,
+STORE, 140475377885184, 140475377889279,
+STORE, 140475377889280, 140475394670591,
+STORE, 140475285630976, 140475294023679,
+SNULL, 140475377889280, 140475386277887,
+STORE, 140475386277888, 140475394670591,
+STORE, 140475377889280, 140475386277887,
+SNULL, 140475386281983, 140475394670591,
+STORE, 140475386277888, 140475386281983,
+STORE, 140475386281984, 140475394670591,
+SNULL, 140475285635071, 140475294023679,
+STORE, 140475285630976, 140475285635071,
+STORE, 140475285635072, 140475294023679,
+STORE, 140475277238272, 140475285630975,
+STORE, 140475268845568, 140475285630975,
+SNULL, 140475268845568, 140475277238271,
+STORE, 140475277238272, 140475285630975,
+STORE, 140475268845568, 140475277238271,
+SNULL, 140475277242367, 140475285630975,
+STORE, 140475277238272, 140475277242367,
+STORE, 140475277242368, 140475285630975,
+STORE, 140475260452864, 140475277238271,
+SNULL, 140475260452864, 140475268845567,
+STORE, 140475268845568, 140475277238271,
+STORE, 140475260452864, 140475268845567,
+SNULL, 140475268849663, 140475277238271,
+STORE, 140475268845568, 140475268849663,
+STORE, 140475268849664, 140475277238271,
+SNULL, 140475260456959, 140475268845567,
+STORE, 140475260452864, 140475260456959,
+STORE, 140475260456960, 140475268845567,
+STORE, 140475252060160, 140475260452863,
+SNULL, 140475252064255, 140475260452863,
+STORE, 140475252060160, 140475252064255,
+STORE, 140475252064256, 140475260452863,
+STORE, 140475243667456, 140475252060159,
+SNULL, 140475243671551, 140475252060159,
+STORE, 140475243667456, 140475243671551,
+STORE, 140475243671552, 140475252060159,
+STORE, 140475235274752, 140475243667455,
+STORE, 140475151413248, 140475159805951,
+STORE, 140474891505664, 140475025588223,
+STORE, 140475143020544, 140475159805951,
+SNULL, 140474891505664, 140474958479359,
+STORE, 140474958479360, 140475025588223,
+STORE, 140474891505664, 140474958479359,
+SNULL, 140474958614527, 140475025588223,
+STORE, 140474958479360, 140474958614527,
+STORE, 140474958614528, 140475025588223,
+STORE, 140474824261632, 140474891370495,
+SNULL, 140474824396799, 140474891370495,
+STORE, 140474824261632, 140474824396799,
+STORE, 140474824396800, 140474891370495,
+STORE, 140475134627840, 140475159805951,
+STORE, 140474690043904, 140474824261631,
+STORE, 140475126235136, 140475159805951,
+STORE, 140475117842432, 140475159805951,
+STORE, 140474622935040, 140474824261631,
+STORE, 140475109449728, 140475159805951,
+STORE, 140474488717312, 140474824261631,
+STORE, 140475101057024, 140475159805951,
+STORE, 140474480324608, 140474488717311,
+STORE, 140474413215744, 140474480324607,
+STORE, 140474404823040, 140474413215743,
+ERASE, 140474413215744, 140474480324607,
+STORE, 140474471931904, 140474488717311,
+STORE, 140474270605312, 140474404823039,
+SNULL, 140475101057024, 140475126235135,
+STORE, 140475126235136, 140475159805951,
+STORE, 140475101057024, 140475126235135,
+SNULL, 140475126239231, 140475159805951,
+STORE, 140475126235136, 140475126239231,
+STORE, 140475126239232, 140475159805951,
+STORE, 140474463539200, 140474488717311,
+STORE, 140474455146496, 140474488717311,
+SNULL, 140474455150591, 140474488717311,
+STORE, 140474455146496, 140474455150591,
+STORE, 140474455150592, 140474488717311,
+STORE, 140474446753792, 140474455146495,
+SNULL, 140474446757887, 140474455146495,
+STORE, 140474446753792, 140474446757887,
+STORE, 140474446757888, 140474455146495,
+STORE, 140474438361088, 140474446753791,
+STORE, 140474429968384, 140474446753791,
+SNULL, 140474429972479, 140474446753791,
+STORE, 140474429968384, 140474429972479,
+STORE, 140474429972480, 140474446753791,
+SNULL, 140475235278847, 140475243667455,
+STORE, 140475235274752, 140475235278847,
+STORE, 140475235278848, 140475243667455,
+SNULL, 140474757152767, 140474824261631,
+STORE, 140474488717312, 140474757152767,
+STORE, 140474757152768, 140474824261631,
+ERASE, 140474757152768, 140474824261631,
+SNULL, 140474488717312, 140474690043903,
+STORE, 140474690043904, 140474757152767,
+STORE, 140474488717312, 140474690043903,
+SNULL, 140474690179071, 140474757152767,
+STORE, 140474690043904, 140474690179071,
+STORE, 140474690179072, 140474757152767,
+SNULL, 140474488717312, 140474622935039,
+STORE, 140474622935040, 140474690043903,
+STORE, 140474488717312, 140474622935039,
+SNULL, 140474623070207, 140474690043903,
+STORE, 140474622935040, 140474623070207,
+STORE, 140474623070208, 140474690043903,
+SNULL, 140475101057024, 140475117842431,
+STORE, 140475117842432, 140475126235135,
+STORE, 140475101057024, 140475117842431,
+SNULL, 140475117846527, 140475126235135,
+STORE, 140475117842432, 140475117846527,
+STORE, 140475117846528, 140475126235135,
+SNULL, 140474555826175, 140474622935039,
+STORE, 140474488717312, 140474555826175,
+STORE, 140474555826176, 140474622935039,
+ERASE, 140474555826176, 140474622935039,
+STORE, 140474136387584, 140474404823039,
+SNULL, 140474136387584, 140474153172991,
+STORE, 140474153172992, 140474404823039,
+STORE, 140474136387584, 140474153172991,
+ERASE, 140474136387584, 140474153172991,
+STORE, 140474018955264, 140474404823039,
+STORE, 140473884737536, 140474404823039,
+SNULL, 140474086064127, 140474404823039,
+STORE, 140473884737536, 140474086064127,
+STORE, 140474086064128, 140474404823039,
+SNULL, 140474086064128, 140474153172991,
+STORE, 140474153172992, 140474404823039,
+STORE, 140474086064128, 140474153172991,
+ERASE, 140474086064128, 140474153172991,
+STORE, 140473750519808, 140474086064127,
+SNULL, 140473817628671, 140474086064127,
+STORE, 140473750519808, 140473817628671,
+STORE, 140473817628672, 140474086064127,
+SNULL, 140473817628672, 140473884737535,
+STORE, 140473884737536, 140474086064127,
+STORE, 140473817628672, 140473884737535,
+ERASE, 140473817628672, 140473884737535,
+SNULL, 140475126239232, 140475151413247,
+STORE, 140475151413248, 140475159805951,
+STORE, 140475126239232, 140475151413247,
+SNULL, 140475151417343, 140475159805951,
+STORE, 140475151413248, 140475151417343,
+STORE, 140475151417344, 140475159805951,
+SNULL, 140474270605311, 140474404823039,
+STORE, 140474153172992, 140474270605311,
+STORE, 140474270605312, 140474404823039,
+SNULL, 140474270605312, 140474287390719,
+STORE, 140474287390720, 140474404823039,
+STORE, 140474270605312, 140474287390719,
+ERASE, 140474270605312, 140474287390719,
+SNULL, 140474429972480, 140474438361087,
+STORE, 140474438361088, 140474446753791,
+STORE, 140474429972480, 140474438361087,
+SNULL, 140474438365183, 140474446753791,
+STORE, 140474438361088, 140474438365183,
+STORE, 140474438365184, 140474446753791,
+STORE, 140474815868928, 140474824261631,
+SNULL, 140474815873023, 140474824261631,
+STORE, 140474815868928, 140474815873023,
+STORE, 140474815873024, 140474824261631,
+SNULL, 140474220281855, 140474270605311,
+STORE, 140474153172992, 140474220281855,
+STORE, 140474220281856, 140474270605311,
+ERASE, 140474220281856, 140474270605311,
+SNULL, 140474488852479, 140474555826175,
+STORE, 140474488717312, 140474488852479,
+STORE, 140474488852480, 140474555826175,
+SNULL, 140475101057024, 140475109449727,
+STORE, 140475109449728, 140475117842431,
+STORE, 140475101057024, 140475109449727,
+SNULL, 140475109453823, 140475117842431,
+STORE, 140475109449728, 140475109453823,
+STORE, 140475109453824, 140475117842431,
+SNULL, 140473951846399, 140474086064127,
+STORE, 140473884737536, 140473951846399,
+STORE, 140473951846400, 140474086064127,
+SNULL, 140473951846400, 140474018955263,
+STORE, 140474018955264, 140474086064127,
+STORE, 140473951846400, 140474018955263,
+ERASE, 140473951846400, 140474018955263,
+SNULL, 140473884872703, 140473951846399,
+STORE, 140473884737536, 140473884872703,
+STORE, 140473884872704, 140473951846399,
+SNULL, 140474019090431, 140474086064127,
+STORE, 140474018955264, 140474019090431,
+STORE, 140474019090432, 140474086064127,
+SNULL, 140473750654975, 140473817628671,
+STORE, 140473750519808, 140473750654975,
+STORE, 140473750654976, 140473817628671,
+SNULL, 140474455150592, 140474463539199,
+STORE, 140474463539200, 140474488717311,
+STORE, 140474455150592, 140474463539199,
+SNULL, 140474463543295, 140474488717311,
+STORE, 140474463539200, 140474463543295,
+STORE, 140474463543296, 140474488717311,
+STORE, 140474807476224, 140474815868927,
+SNULL, 140474463543296, 140474471931903,
+STORE, 140474471931904, 140474488717311,
+STORE, 140474463543296, 140474471931903,
+SNULL, 140474471935999, 140474488717311,
+STORE, 140474471931904, 140474471935999,
+STORE, 140474471936000, 140474488717311,
+STORE, 140474799083520, 140474815868927,
+STORE, 140474790690816, 140474815868927,
+SNULL, 140474790690816, 140474799083519,
+STORE, 140474799083520, 140474815868927,
+STORE, 140474790690816, 140474799083519,
+SNULL, 140474799087615, 140474815868927,
+STORE, 140474799083520, 140474799087615,
+STORE, 140474799087616, 140474815868927,
+SNULL, 140474354499583, 140474404823039,
+STORE, 140474287390720, 140474354499583,
+STORE, 140474354499584, 140474404823039,
+ERASE, 140474354499584, 140474404823039,
+SNULL, 140474287525887, 140474354499583,
+STORE, 140474287390720, 140474287525887,
+STORE, 140474287525888, 140474354499583,
+STORE, 140474782298112, 140474799083519,
+STORE, 140474773905408, 140474799083519,
+SNULL, 140474773909503, 140474799083519,
+STORE, 140474773905408, 140474773909503,
+STORE, 140474773909504, 140474799083519,
+SNULL, 140475126239232, 140475134627839,
+STORE, 140475134627840, 140475151413247,
+STORE, 140475126239232, 140475134627839,
+SNULL, 140475134631935, 140475151413247,
+STORE, 140475134627840, 140475134631935,
+STORE, 140475134631936, 140475151413247,
+STORE, 140474765512704, 140474773905407,
+STORE, 140474614542336, 140474622935039,
+SNULL, 140474153308159, 140474220281855,
+STORE, 140474153172992, 140474153308159,
+STORE, 140474153308160, 140474220281855,
+SNULL, 140474404827135, 140474413215743,
+STORE, 140474404823040, 140474404827135,
+STORE, 140474404827136, 140474413215743,
+STORE, 140474606149632, 140474622935039,
+SNULL, 140474606153727, 140474622935039,
+STORE, 140474606149632, 140474606153727,
+STORE, 140474606153728, 140474622935039,
+STORE, 140474597756928, 140474606149631,
+SNULL, 140474597761023, 140474606149631,
+STORE, 140474597756928, 140474597761023,
+STORE, 140474597761024, 140474606149631,
+SNULL, 140475134631936, 140475143020543,
+STORE, 140475143020544, 140475151413247,
+STORE, 140475134631936, 140475143020543,
+SNULL, 140475143024639, 140475151413247,
+STORE, 140475143020544, 140475143024639,
+STORE, 140475143024640, 140475151413247,
+STORE, 140474589364224, 140474597756927,
+SNULL, 140474606153728, 140474614542335,
+STORE, 140474614542336, 140474622935039,
+STORE, 140474606153728, 140474614542335,
+SNULL, 140474614546431, 140474622935039,
+STORE, 140474614542336, 140474614546431,
+STORE, 140474614546432, 140474622935039,
+SNULL, 140474765516799, 140474773905407,
+STORE, 140474765512704, 140474765516799,
+STORE, 140474765516800, 140474773905407,
+STORE, 140474580971520, 140474597756927,
+SNULL, 140474773909504, 140474782298111,
+STORE, 140474782298112, 140474799083519,
+STORE, 140474773909504, 140474782298111,
+SNULL, 140474782302207, 140474799083519,
+STORE, 140474782298112, 140474782302207,
+STORE, 140474782302208, 140474799083519,
+SNULL, 140474471936000, 140474480324607,
+STORE, 140474480324608, 140474488717311,
+STORE, 140474471936000, 140474480324607,
+SNULL, 140474480328703, 140474488717311,
+STORE, 140474480324608, 140474480328703,
+STORE, 140474480328704, 140474488717311,
+STORE, 140474572578816, 140474597756927,
+SNULL, 140474572582911, 140474597756927,
+STORE, 140474572578816, 140474572582911,
+STORE, 140474572582912, 140474597756927,
+SNULL, 140474782302208, 140474790690815,
+STORE, 140474790690816, 140474799083519,
+STORE, 140474782302208, 140474790690815,
+SNULL, 140474790694911, 140474799083519,
+STORE, 140474790690816, 140474790694911,
+STORE, 140474790694912, 140474799083519,
+STORE, 140474564186112, 140474572578815,
+STORE, 140474421575680, 140474429968383,
+STORE, 140474396430336, 140474404823039,
+SNULL, 140474396434431, 140474404823039,
+STORE, 140474396430336, 140474396434431,
+STORE, 140474396434432, 140474404823039,
+STORE, 140474388037632, 140474396430335,
+SNULL, 140474799087616, 140474807476223,
+STORE, 140474807476224, 140474815868927,
+STORE, 140474799087616, 140474807476223,
+SNULL, 140474807480319, 140474815868927,
+STORE, 140474807476224, 140474807480319,
+STORE, 140474807480320, 140474815868927,
+SNULL, 140475101061119, 140475109449727,
+STORE, 140475101057024, 140475101061119,
+STORE, 140475101061120, 140475109449727,
+STORE, 140474379644928, 140474396430335,
+SNULL, 140474572582912, 140474589364223,
+STORE, 140474589364224, 140474597756927,
+STORE, 140474572582912, 140474589364223,
+SNULL, 140474589368319, 140474597756927,
+STORE, 140474589364224, 140474589368319,
+STORE, 140474589368320, 140474597756927,
+STORE, 140474371252224, 140474396430335,
+STORE, 140474362859520, 140474396430335,
+STORE, 140474278998016, 140474287390719,
+STORE, 140474270605312, 140474287390719,
+STORE, 140474262212608, 140474287390719,
+SNULL, 140474262216703, 140474287390719,
+STORE, 140474262212608, 140474262216703,
+STORE, 140474262216704, 140474287390719,
+STORE, 140474253819904, 140474262212607,
+SNULL, 140474253823999, 140474262212607,
+STORE, 140474253819904, 140474253823999,
+STORE, 140474253824000, 140474262212607,
+SNULL, 140474362859520, 140474388037631,
+STORE, 140474388037632, 140474396430335,
+STORE, 140474362859520, 140474388037631,
+SNULL, 140474388041727, 140474396430335,
+STORE, 140474388037632, 140474388041727,
+STORE, 140474388041728, 140474396430335,
+SNULL, 140474362859520, 140474379644927,
+STORE, 140474379644928, 140474388037631,
+STORE, 140474362859520, 140474379644927,
+SNULL, 140474379649023, 140474388037631,
+STORE, 140474379644928, 140474379649023,
+STORE, 140474379649024, 140474388037631,
+STORE, 140474245427200, 140474253819903,
+STORE, 140474237034496, 140474253819903,
+STORE, 140474228641792, 140474253819903,
+STORE, 140474144780288, 140474153172991,
+SNULL, 140474228645887, 140474253819903,
+STORE, 140474228641792, 140474228645887,
+STORE, 140474228645888, 140474253819903,
+SNULL, 140474564190207, 140474572578815,
+STORE, 140474564186112, 140474564190207,
+STORE, 140474564190208, 140474572578815,
+STORE, 140474136387584, 140474153172991,
+SNULL, 140474362859520, 140474371252223,
+STORE, 140474371252224, 140474379644927,
+STORE, 140474362859520, 140474371252223,
+SNULL, 140474371256319, 140474379644927,
+STORE, 140474371252224, 140474371256319,
+STORE, 140474371256320, 140474379644927,
+STORE, 140474127994880, 140474153172991,
+STORE, 140474119602176, 140474153172991,
+SNULL, 140474421579775, 140474429968383,
+STORE, 140474421575680, 140474421579775,
+STORE, 140474421579776, 140474429968383,
+STORE, 140474111209472, 140474153172991,
+SNULL, 140474111213567, 140474153172991,
+STORE, 140474111209472, 140474111213567,
+STORE, 140474111213568, 140474153172991,
+SNULL, 140474262216704, 140474270605311,
+STORE, 140474270605312, 140474287390719,
+STORE, 140474262216704, 140474270605311,
+SNULL, 140474270609407, 140474287390719,
+STORE, 140474270605312, 140474270609407,
+STORE, 140474270609408, 140474287390719,
+STORE, 140474102816768, 140474111209471,
+SNULL, 140474102820863, 140474111209471,
+STORE, 140474102816768, 140474102820863,
+STORE, 140474102820864, 140474111209471,
+SNULL, 140474270609408, 140474278998015,
+STORE, 140474278998016, 140474287390719,
+STORE, 140474270609408, 140474278998015,
+SNULL, 140474279002111, 140474287390719,
+STORE, 140474278998016, 140474279002111,
+STORE, 140474279002112, 140474287390719,
+STORE, 140474094424064, 140474102816767,
+SNULL, 140474572582912, 140474580971519,
+STORE, 140474580971520, 140474589364223,
+STORE, 140474572582912, 140474580971519,
+SNULL, 140474580975615, 140474589364223,
+STORE, 140474580971520, 140474580975615,
+STORE, 140474580975616, 140474589364223,
+SNULL, 140474362863615, 140474371252223,
+STORE, 140474362859520, 140474362863615,
+STORE, 140474362863616, 140474371252223,
+STORE, 140474010562560, 140474018955263,
+SNULL, 140474228645888, 140474245427199,
+STORE, 140474245427200, 140474253819903,
+STORE, 140474228645888, 140474245427199,
+SNULL, 140474245431295, 140474253819903,
+STORE, 140474245427200, 140474245431295,
+STORE, 140474245431296, 140474253819903,
+SNULL, 140474111213568, 140474136387583,
+STORE, 140474136387584, 140474153172991,
+STORE, 140474111213568, 140474136387583,
+SNULL, 140474136391679, 140474153172991,
+STORE, 140474136387584, 140474136391679,
+STORE, 140474136391680, 140474153172991,
+STORE, 140474002169856, 140474018955263,
+STORE, 140473993777152, 140474018955263,
+SNULL, 140474111213568, 140474127994879,
+STORE, 140474127994880, 140474136387583,
+STORE, 140474111213568, 140474127994879,
+SNULL, 140474127998975, 140474136387583,
+STORE, 140474127994880, 140474127998975,
+STORE, 140474127998976, 140474136387583,
+SNULL, 140474228645888, 140474237034495,
+STORE, 140474237034496, 140474245427199,
+STORE, 140474228645888, 140474237034495,
+SNULL, 140474237038591, 140474245427199,
+STORE, 140474237034496, 140474237038591,
+STORE, 140474237038592, 140474245427199,
+SNULL, 140474136391680, 140474144780287,
+STORE, 140474144780288, 140474153172991,
+STORE, 140474136391680, 140474144780287,
+SNULL, 140474144784383, 140474153172991,
+STORE, 140474144780288, 140474144784383,
+STORE, 140474144784384, 140474153172991,
+STORE, 140473985384448, 140474018955263,
+STORE, 140473976991744, 140474018955263,
+STORE, 140473968599040, 140474018955263,
+SNULL, 140473968603135, 140474018955263,
+STORE, 140473968599040, 140473968603135,
+STORE, 140473968603136, 140474018955263,
+SNULL, 140474111213568, 140474119602175,
+STORE, 140474119602176, 140474127994879,
+STORE, 140474111213568, 140474119602175,
+SNULL, 140474119606271, 140474127994879,
+STORE, 140474119602176, 140474119606271,
+STORE, 140474119606272, 140474127994879,
+STORE, 140473960206336, 140473968599039,
+SNULL, 140474094428159, 140474102816767,
+STORE, 140474094424064, 140474094428159,
+STORE, 140474094428160, 140474102816767,
+STORE, 140473876344832, 140473884737535,
+STORE, 140473867952128, 140473884737535,
+STORE, 140473859559424, 140473884737535,
+SNULL, 140473859563519, 140473884737535,
+STORE, 140473859559424, 140473859563519,
+STORE, 140473859563520, 140473884737535,
+SNULL, 140473968603136, 140473993777151,
+STORE, 140473993777152, 140474018955263,
+STORE, 140473968603136, 140473993777151,
+SNULL, 140473993781247, 140474018955263,
+STORE, 140473993777152, 140473993781247,
+STORE, 140473993781248, 140474018955263,
+SNULL, 140473960210431, 140473968599039,
+STORE, 140473960206336, 140473960210431,
+STORE, 140473960210432, 140473968599039,
+SNULL, 140473993781248, 140474010562559,
+STORE, 140474010562560, 140474018955263,
+STORE, 140473993781248, 140474010562559,
+SNULL, 140474010566655, 140474018955263,
+STORE, 140474010562560, 140474010566655,
+STORE, 140474010566656, 140474018955263,
+SNULL, 140473968603136, 140473985384447,
+STORE, 140473985384448, 140473993777151,
+STORE, 140473968603136, 140473985384447,
+SNULL, 140473985388543, 140473993777151,
+STORE, 140473985384448, 140473985388543,
+STORE, 140473985388544, 140473993777151,
+SNULL, 140473993781248, 140474002169855,
+STORE, 140474002169856, 140474010562559,
+STORE, 140473993781248, 140474002169855,
+SNULL, 140474002173951, 140474010562559,
+STORE, 140474002169856, 140474002173951,
+STORE, 140474002173952, 140474010562559,
+STORE, 140473851166720, 140473859559423,
+SNULL, 140473851170815, 140473859559423,
+STORE, 140473851166720, 140473851170815,
+STORE, 140473851170816, 140473859559423,
+SNULL, 140473968603136, 140473976991743,
+STORE, 140473976991744, 140473985384447,
+STORE, 140473968603136, 140473976991743,
+SNULL, 140473976995839, 140473985384447,
+STORE, 140473976991744, 140473976995839,
+STORE, 140473976995840, 140473985384447,
+STORE, 140473842774016, 140473851166719,
+SNULL, 140473859563520, 140473867952127,
+STORE, 140473867952128, 140473884737535,
+STORE, 140473859563520, 140473867952127,
+SNULL, 140473867956223, 140473884737535,
+STORE, 140473867952128, 140473867956223,
+STORE, 140473867956224, 140473884737535,
+SNULL, 140473867956224, 140473876344831,
+STORE, 140473876344832, 140473884737535,
+STORE, 140473867956224, 140473876344831,
+SNULL, 140473876348927, 140473884737535,
+STORE, 140473876344832, 140473876348927,
+STORE, 140473876348928, 140473884737535,
+STORE, 140473834381312, 140473851166719,
+SNULL, 140473834385407, 140473851166719,
+STORE, 140473834381312, 140473834385407,
+STORE, 140473834385408, 140473851166719,
+SNULL, 140473834385408, 140473842774015,
+STORE, 140473842774016, 140473851166719,
+STORE, 140473834385408, 140473842774015,
+SNULL, 140473842778111, 140473851166719,
+STORE, 140473842774016, 140473842778111,
+STORE, 140473842778112, 140473851166719,
+STORE, 140473825988608, 140473834381311,
+SNULL, 140473825992703, 140473834381311,
+STORE, 140473825988608, 140473825992703,
+STORE, 140473825992704, 140473834381311,
+STORE, 140475577475072, 140475577503743,
+STORE, 140475499917312, 140475502108671,
+SNULL, 140475499917312, 140475500007423,
+STORE, 140475500007424, 140475502108671,
+STORE, 140475499917312, 140475500007423,
+SNULL, 140475502100479, 140475502108671,
+STORE, 140475500007424, 140475502100479,
+STORE, 140475502100480, 140475502108671,
+ERASE, 140475502100480, 140475502108671,
+STORE, 140475502100480, 140475502108671,
+SNULL, 140475502104575, 140475502108671,
+STORE, 140475502100480, 140475502104575,
+STORE, 140475502104576, 140475502108671,
+ERASE, 140475577475072, 140475577503743,
+ERASE, 140475235274752, 140475235278847,
+ERASE, 140475235278848, 140475243667455,
+ERASE, 140474815868928, 140474815873023,
+ERASE, 140474815873024, 140474824261631,
+ERASE, 140474606149632, 140474606153727,
+ERASE, 140474606153728, 140474614542335,
+ERASE, 140474270605312, 140474270609407,
+ERASE, 140474270609408, 140474278998015,
+ERASE, 140474438361088, 140474438365183,
+ERASE, 140474438365184, 140474446753791,
+ERASE, 140474597756928, 140474597761023,
+ERASE, 140474597761024, 140474606149631,
+ERASE, 140475126235136, 140475126239231,
+ERASE, 140475126239232, 140475134627839,
+ERASE, 140474463539200, 140474463543295,
+ERASE, 140474463543296, 140474471931903,
+ERASE, 140474388037632, 140474388041727,
+ERASE, 140474388041728, 140474396430335,
+ERASE, 140474404823040, 140474404827135,
+ERASE, 140474404827136, 140474413215743,
+ERASE, 140474278998016, 140474279002111,
+ERASE, 140474279002112, 140474287390719,
+ERASE, 140474094424064, 140474094428159,
+ERASE, 140474094428160, 140474102816767,
+ERASE, 140473867952128, 140473867956223,
+ERASE, 140473867956224, 140473876344831,
+ERASE, 140475151413248, 140475151417343,
+ERASE, 140475151417344, 140475159805951,
+ERASE, 140474455146496, 140474455150591,
+ERASE, 140474455150592, 140474463539199,
+ERASE, 140474807476224, 140474807480319,
+ERASE, 140474807480320, 140474815868927,
+ERASE, 140475117842432, 140475117846527,
+ERASE, 140475117846528, 140475126235135,
+ERASE, 140474446753792, 140474446757887,
+ERASE, 140474446757888, 140474455146495,
+ERASE, 140474429968384, 140474429972479,
+ERASE, 140474429972480, 140474438361087,
+ERASE, 140474782298112, 140474782302207,
+ERASE, 140474782302208, 140474790690815,
+ERASE, 140474136387584, 140474136391679,
+ERASE, 140474136391680, 140474144780287,
+ERASE, 140474002169856, 140474002173951,
+ERASE, 140474002173952, 140474010562559,
+ERASE, 140475134627840, 140475134631935,
+ERASE, 140475134631936, 140475143020543,
+ERASE, 140474471931904, 140474471935999,
+ERASE, 140474471936000, 140474480324607,
+ERASE, 140474396430336, 140474396434431,
+ERASE, 140474396434432, 140474404823039,
+ };
+ unsigned long set36[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140723893125120, 140737488351231,
+SNULL, 140723893129215, 140737488351231,
+STORE, 140723893125120, 140723893129215,
+STORE, 140723892994048, 140723893129215,
+STORE, 94076829786112, 94076832038911,
+SNULL, 94076829917183, 94076832038911,
+STORE, 94076829786112, 94076829917183,
+STORE, 94076829917184, 94076832038911,
+ERASE, 94076829917184, 94076832038911,
+STORE, 94076832010240, 94076832018431,
+STORE, 94076832018432, 94076832038911,
+STORE, 140122444345344, 140122446598143,
+SNULL, 140122444488703, 140122446598143,
+STORE, 140122444345344, 140122444488703,
+STORE, 140122444488704, 140122446598143,
+ERASE, 140122444488704, 140122446598143,
+STORE, 140122446585856, 140122446594047,
+STORE, 140122446594048, 140122446598143,
+STORE, 140723893538816, 140723893542911,
+STORE, 140723893526528, 140723893538815,
+STORE, 140122446557184, 140122446585855,
+STORE, 140122446548992, 140122446557183,
+STORE, 140122442129408, 140122444345343,
+SNULL, 140122442129408, 140122442227711,
+STORE, 140122442227712, 140122444345343,
+STORE, 140122442129408, 140122442227711,
+SNULL, 140122444320767, 140122444345343,
+STORE, 140122442227712, 140122444320767,
+STORE, 140122444320768, 140122444345343,
+SNULL, 140122444320768, 140122444328959,
+STORE, 140122444328960, 140122444345343,
+STORE, 140122444320768, 140122444328959,
+ERASE, 140122444320768, 140122444328959,
+STORE, 140122444320768, 140122444328959,
+ERASE, 140122444328960, 140122444345343,
+STORE, 140122444328960, 140122444345343,
+STORE, 140122438332416, 140122442129407,
+SNULL, 140122438332416, 140122439991295,
+STORE, 140122439991296, 140122442129407,
+STORE, 140122438332416, 140122439991295,
+SNULL, 140122442088447, 140122442129407,
+STORE, 140122439991296, 140122442088447,
+STORE, 140122442088448, 140122442129407,
+SNULL, 140122442088448, 140122442113023,
+STORE, 140122442113024, 140122442129407,
+STORE, 140122442088448, 140122442113023,
+ERASE, 140122442088448, 140122442113023,
+STORE, 140122442088448, 140122442113023,
+ERASE, 140122442113024, 140122442129407,
+STORE, 140122442113024, 140122442129407,
+STORE, 140122446540800, 140122446557183,
+SNULL, 140122442104831, 140122442113023,
+STORE, 140122442088448, 140122442104831,
+STORE, 140122442104832, 140122442113023,
+SNULL, 140122444324863, 140122444328959,
+STORE, 140122444320768, 140122444324863,
+STORE, 140122444324864, 140122444328959,
+SNULL, 94076832014335, 94076832018431,
+STORE, 94076832010240, 94076832014335,
+STORE, 94076832014336, 94076832018431,
+SNULL, 140122446589951, 140122446594047,
+STORE, 140122446585856, 140122446589951,
+STORE, 140122446589952, 140122446594047,
+ERASE, 140122446557184, 140122446585855,
+STORE, 94076845723648, 94076845858815,
+STORE, 140122429939712, 140122438332415,
+SNULL, 140122429943807, 140122438332415,
+STORE, 140122429939712, 140122429943807,
+STORE, 140122429943808, 140122438332415,
+STORE, 140122421547008, 140122429939711,
+STORE, 140122287329280, 140122421547007,
+SNULL, 140122287329280, 140122301399039,
+STORE, 140122301399040, 140122421547007,
+STORE, 140122287329280, 140122301399039,
+ERASE, 140122287329280, 140122301399039,
+SNULL, 140122368507903, 140122421547007,
+STORE, 140122301399040, 140122368507903,
+STORE, 140122368507904, 140122421547007,
+ERASE, 140122368507904, 140122421547007,
+SNULL, 140122301534207, 140122368507903,
+STORE, 140122301399040, 140122301534207,
+STORE, 140122301534208, 140122368507903,
+SNULL, 140122421551103, 140122429939711,
+STORE, 140122421547008, 140122421551103,
+STORE, 140122421551104, 140122429939711,
+STORE, 140122413154304, 140122421547007,
+SNULL, 140122413158399, 140122421547007,
+STORE, 140122413154304, 140122413158399,
+STORE, 140122413158400, 140122421547007,
+STORE, 140122404761600, 140122413154303,
+SNULL, 140122404765695, 140122413154303,
+STORE, 140122404761600, 140122404765695,
+STORE, 140122404765696, 140122413154303,
+STORE, 140122396368896, 140122404761599,
+SNULL, 140122396372991, 140122404761599,
+STORE, 140122396368896, 140122396372991,
+STORE, 140122396372992, 140122404761599,
+STORE, 140122387976192, 140122396368895,
+STORE, 140122167181312, 140122301399039,
+SNULL, 140122234290175, 140122301399039,
+STORE, 140122167181312, 140122234290175,
+STORE, 140122234290176, 140122301399039,
+ERASE, 140122234290176, 140122301399039,
+SNULL, 140122167316479, 140122234290175,
+STORE, 140122167181312, 140122167316479,
+STORE, 140122167316480, 140122234290175,
+STORE, 140122379583488, 140122396368895,
+STORE, 140122371190784, 140122396368895,
+STORE, 140122167316480, 140122301399039,
+STORE, 140122158788608, 140122167181311,
+SNULL, 140122371190784, 140122387976191,
+STORE, 140122387976192, 140122396368895,
+STORE, 140122371190784, 140122387976191,
+SNULL, 140122387980287, 140122396368895,
+STORE, 140122387976192, 140122387980287,
+STORE, 140122387980288, 140122396368895,
+SNULL, 140122167316480, 140122234290175,
+STORE, 140122234290176, 140122301399039,
+STORE, 140122167316480, 140122234290175,
+SNULL, 140122234425343, 140122301399039,
+STORE, 140122234290176, 140122234425343,
+STORE, 140122234425344, 140122301399039,
+STORE, 140122024570880, 140122158788607,
+SNULL, 140122024570880, 140122032963583,
+STORE, 140122032963584, 140122158788607,
+STORE, 140122024570880, 140122032963583,
+ERASE, 140122024570880, 140122032963583,
+STORE, 140121898745856, 140122158788607,
+STORE, 140121890353152, 140121898745855,
+SNULL, 140122100072447, 140122158788607,
+STORE, 140121898745856, 140122100072447,
+STORE, 140122100072448, 140122158788607,
+ERASE, 140122100072448, 140122158788607,
+SNULL, 140121965854719, 140122100072447,
+STORE, 140121898745856, 140121965854719,
+STORE, 140121965854720, 140122100072447,
+SNULL, 140121965854720, 140122032963583,
+STORE, 140122032963584, 140122100072447,
+STORE, 140121965854720, 140122032963583,
+ERASE, 140121965854720, 140122032963583,
+SNULL, 140121898881023, 140121965854719,
+STORE, 140121898745856, 140121898881023,
+STORE, 140121898881024, 140121965854719,
+SNULL, 140121890357247, 140121898745855,
+STORE, 140121890353152, 140121890357247,
+STORE, 140121890357248, 140121898745855,
+SNULL, 140122371190784, 140122379583487,
+STORE, 140122379583488, 140122387976191,
+STORE, 140122371190784, 140122379583487,
+SNULL, 140122379587583, 140122387976191,
+STORE, 140122379583488, 140122379587583,
+STORE, 140122379587584, 140122387976191,
+SNULL, 140122033098751, 140122100072447,
+STORE, 140122032963584, 140122033098751,
+STORE, 140122033098752, 140122100072447,
+SNULL, 140122158792703, 140122167181311,
+STORE, 140122158788608, 140122158792703,
+STORE, 140122158792704, 140122167181311,
+STORE, 140122150395904, 140122158788607,
+STORE, 140122142003200, 140122158788607,
+SNULL, 140122142007295, 140122158788607,
+STORE, 140122142003200, 140122142007295,
+STORE, 140122142007296, 140122158788607,
+SNULL, 140122371194879, 140122379583487,
+STORE, 140122371190784, 140122371194879,
+STORE, 140122371194880, 140122379583487,
+SNULL, 140122142007296, 140122150395903,
+STORE, 140122150395904, 140122158788607,
+STORE, 140122142007296, 140122150395903,
+SNULL, 140122150399999, 140122158788607,
+STORE, 140122150395904, 140122150399999,
+STORE, 140122150400000, 140122158788607,
+STORE, 140122133610496, 140122142003199,
+STORE, 140122125217792, 140122142003199,
+STORE, 140122116825088, 140122142003199,
+SNULL, 140122116829183, 140122142003199,
+STORE, 140122116825088, 140122116829183,
+STORE, 140122116829184, 140122142003199,
+SNULL, 140122116829184, 140122133610495,
+STORE, 140122133610496, 140122142003199,
+STORE, 140122116829184, 140122133610495,
+SNULL, 140122133614591, 140122142003199,
+STORE, 140122133610496, 140122133614591,
+STORE, 140122133614592, 140122142003199,
+SNULL, 140122116829184, 140122125217791,
+STORE, 140122125217792, 140122133610495,
+STORE, 140122116829184, 140122125217791,
+SNULL, 140122125221887, 140122133610495,
+STORE, 140122125217792, 140122125221887,
+STORE, 140122125221888, 140122133610495,
+STORE, 140122108432384, 140122116825087,
+SNULL, 140122108436479, 140122116825087,
+STORE, 140122108432384, 140122108436479,
+STORE, 140122108436480, 140122116825087,
+STORE, 140122024570880, 140122032963583,
+STORE, 140122016178176, 140122032963583,
+SNULL, 140122016182271, 140122032963583,
+STORE, 140122016178176, 140122016182271,
+STORE, 140122016182272, 140122032963583,
+SNULL, 140122016182272, 140122024570879,
+STORE, 140122024570880, 140122032963583,
+STORE, 140122016182272, 140122024570879,
+SNULL, 140122024574975, 140122032963583,
+STORE, 140122024570880, 140122024574975,
+STORE, 140122024574976, 140122032963583,
+STORE, 140122007785472, 140122016178175,
+SNULL, 140122007789567, 140122016178175,
+STORE, 140122007785472, 140122007789567,
+STORE, 140122007789568, 140122016178175,
+STORE, 140121999392768, 140122007785471,
+STORE, 140121991000064, 140122007785471,
+SNULL, 140121991004159, 140122007785471,
+STORE, 140121991000064, 140121991004159,
+STORE, 140121991004160, 140122007785471,
+SNULL, 140121991004160, 140121999392767,
+STORE, 140121999392768, 140122007785471,
+STORE, 140121991004160, 140121999392767,
+SNULL, 140121999396863, 140122007785471,
+STORE, 140121999392768, 140121999396863,
+STORE, 140121999396864, 140122007785471,
+STORE, 140121982607360, 140121991000063,
+STORE, 140121823244288, 140121890353151,
+ERASE, 140121823244288, 140121890353151,
+STORE, 140121756135424, 140121890353151,
+SNULL, 140121756135424, 140121764528127,
+STORE, 140121764528128, 140121890353151,
+STORE, 140121756135424, 140121764528127,
+ERASE, 140121756135424, 140121764528127,
+SNULL, 140121831636991, 140121890353151,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121831636992, 140121890353151,
+ERASE, 140121831636992, 140121890353151,
+STORE, 140121974214656, 140121991000063,
+STORE, 140121630310400, 140121831636991,
+SNULL, 140121697419263, 140121831636991,
+STORE, 140121630310400, 140121697419263,
+STORE, 140121697419264, 140121831636991,
+SNULL, 140121697419264, 140121764528127,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121697419264, 140121764528127,
+ERASE, 140121697419264, 140121764528127,
+STORE, 140121881960448, 140121890353151,
+STORE, 140121630310400, 140121831636991,
+STORE, 140121873567744, 140121890353151,
+SNULL, 140121630310400, 140121697419263,
+STORE, 140121697419264, 140121831636991,
+STORE, 140121630310400, 140121697419263,
+SNULL, 140121697554431, 140121831636991,
+STORE, 140121697419264, 140121697554431,
+STORE, 140121697554432, 140121831636991,
+STORE, 140121865175040, 140121890353151,
+STORE, 140121856782336, 140121890353151,
+STORE, 140121848389632, 140121890353151,
+STORE, 140121839996928, 140121890353151,
+STORE, 140121496092672, 140121697419263,
+STORE, 140121487699968, 140121496092671,
+STORE, 140121420591104, 140121487699967,
+STORE, 140121412198400, 140121420591103,
+ERASE, 140121420591104, 140121487699967,
+STORE, 140121479307264, 140121496092671,
+STORE, 140121277980672, 140121412198399,
+SNULL, 140121277980672, 140121294766079,
+STORE, 140121294766080, 140121412198399,
+STORE, 140121277980672, 140121294766079,
+ERASE, 140121277980672, 140121294766079,
+STORE, 140121470914560, 140121496092671,
+STORE, 140121462521856, 140121496092671,
+STORE, 140121160548352, 140121412198399,
+STORE, 140121454129152, 140121496092671,
+SNULL, 140121227657215, 140121412198399,
+STORE, 140121160548352, 140121227657215,
+STORE, 140121227657216, 140121412198399,
+SNULL, 140121227657216, 140121294766079,
+STORE, 140121294766080, 140121412198399,
+STORE, 140121227657216, 140121294766079,
+ERASE, 140121227657216, 140121294766079,
+STORE, 140121445736448, 140121496092671,
+STORE, 140121437343744, 140121496092671,
+SNULL, 140121437343744, 140121445736447,
+STORE, 140121445736448, 140121496092671,
+STORE, 140121437343744, 140121445736447,
+SNULL, 140121445740543, 140121496092671,
+STORE, 140121445736448, 140121445740543,
+STORE, 140121445740544, 140121496092671,
+SNULL, 140121697554432, 140121764528127,
+STORE, 140121764528128, 140121831636991,
+STORE, 140121697554432, 140121764528127,
+SNULL, 140121764663295, 140121831636991,
+STORE, 140121764528128, 140121764663295,
+STORE, 140121764663296, 140121831636991,
+SNULL, 140121496092672, 140121630310399,
+STORE, 140121630310400, 140121697419263,
+STORE, 140121496092672, 140121630310399,
+SNULL, 140121630445567, 140121697419263,
+STORE, 140121630310400, 140121630445567,
+STORE, 140121630445568, 140121697419263,
+SNULL, 140121445740544, 140121454129151,
+STORE, 140121454129152, 140121496092671,
+STORE, 140121445740544, 140121454129151,
+SNULL, 140121454133247, 140121496092671,
+STORE, 140121454129152, 140121454133247,
+STORE, 140121454133248, 140121496092671,
+STORE, 140121026330624, 140121227657215,
+SNULL, 140121093439487, 140121227657215,
+STORE, 140121026330624, 140121093439487,
+STORE, 140121093439488, 140121227657215,
+SNULL, 140121093439488, 140121160548351,
+STORE, 140121160548352, 140121227657215,
+STORE, 140121093439488, 140121160548351,
+ERASE, 140121093439488, 140121160548351,
+SNULL, 140121563201535, 140121630310399,
+STORE, 140121496092672, 140121563201535,
+STORE, 140121563201536, 140121630310399,
+ERASE, 140121563201536, 140121630310399,
+STORE, 140120892112896, 140121093439487,
+SNULL, 140120959221759, 140121093439487,
+STORE, 140120892112896, 140120959221759,
+STORE, 140120959221760, 140121093439487,
+SNULL, 140120959221760, 140121026330623,
+STORE, 140121026330624, 140121093439487,
+STORE, 140120959221760, 140121026330623,
+ERASE, 140120959221760, 140121026330623,
+STORE, 140120757895168, 140120959221759,
+SNULL, 140121361874943, 140121412198399,
+STORE, 140121294766080, 140121361874943,
+STORE, 140121361874944, 140121412198399,
+ERASE, 140121361874944, 140121412198399,
+SNULL, 140121294901247, 140121361874943,
+STORE, 140121294766080, 140121294901247,
+STORE, 140121294901248, 140121361874943,
+STORE, 140120623677440, 140120959221759,
+SNULL, 140120690786303, 140120959221759,
+STORE, 140120623677440, 140120690786303,
+STORE, 140120690786304, 140120959221759,
+SNULL, 140120690786304, 140120757895167,
+STORE, 140120757895168, 140120959221759,
+STORE, 140120690786304, 140120757895167,
+ERASE, 140120690786304, 140120757895167,
+SNULL, 140121160683519, 140121227657215,
+STORE, 140121160548352, 140121160683519,
+STORE, 140121160683520, 140121227657215,
+SNULL, 140121974214656, 140121982607359,
+STORE, 140121982607360, 140121991000063,
+STORE, 140121974214656, 140121982607359,
+SNULL, 140121982611455, 140121991000063,
+STORE, 140121982607360, 140121982611455,
+STORE, 140121982611456, 140121991000063,
+SNULL, 140121839996928, 140121873567743,
+STORE, 140121873567744, 140121890353151,
+STORE, 140121839996928, 140121873567743,
+SNULL, 140121873571839, 140121890353151,
+STORE, 140121873567744, 140121873571839,
+STORE, 140121873571840, 140121890353151,
+SNULL, 140121873571840, 140121881960447,
+STORE, 140121881960448, 140121890353151,
+STORE, 140121873571840, 140121881960447,
+SNULL, 140121881964543, 140121890353151,
+STORE, 140121881960448, 140121881964543,
+STORE, 140121881964544, 140121890353151,
+SNULL, 140121840001023, 140121873567743,
+STORE, 140121839996928, 140121840001023,
+STORE, 140121840001024, 140121873567743,
+SNULL, 140121840001024, 140121865175039,
+STORE, 140121865175040, 140121873567743,
+STORE, 140121840001024, 140121865175039,
+SNULL, 140121865179135, 140121873567743,
+STORE, 140121865175040, 140121865179135,
+STORE, 140121865179136, 140121873567743,
+SNULL, 140121437347839, 140121445736447,
+STORE, 140121437343744, 140121437347839,
+STORE, 140121437347840, 140121445736447,
+STORE, 140121621917696, 140121630310399,
+STORE, 140121613524992, 140121630310399,
+SNULL, 140121026465791, 140121093439487,
+STORE, 140121026330624, 140121026465791,
+STORE, 140121026465792, 140121093439487,
+SNULL, 140121496227839, 140121563201535,
+STORE, 140121496092672, 140121496227839,
+STORE, 140121496227840, 140121563201535,
+SNULL, 140120757895168, 140120892112895,
+STORE, 140120892112896, 140120959221759,
+STORE, 140120757895168, 140120892112895,
+SNULL, 140120892248063, 140120959221759,
+STORE, 140120892112896, 140120892248063,
+STORE, 140120892248064, 140120959221759,
+SNULL, 140120825004031, 140120892112895,
+STORE, 140120757895168, 140120825004031,
+STORE, 140120825004032, 140120892112895,
+ERASE, 140120825004032, 140120892112895,
+SNULL, 140120623812607, 140120690786303,
+STORE, 140120623677440, 140120623812607,
+STORE, 140120623812608, 140120690786303,
+SNULL, 140120758030335, 140120825004031,
+STORE, 140120757895168, 140120758030335,
+STORE, 140120758030336, 140120825004031,
+SNULL, 140121454133248, 140121462521855,
+STORE, 140121462521856, 140121496092671,
+STORE, 140121454133248, 140121462521855,
+SNULL, 140121462525951, 140121496092671,
+STORE, 140121462521856, 140121462525951,
+STORE, 140121462525952, 140121496092671,
+STORE, 140121605132288, 140121630310399,
+SNULL, 140121605136383, 140121630310399,
+STORE, 140121605132288, 140121605136383,
+STORE, 140121605136384, 140121630310399,
+STORE, 140121596739584, 140121605132287,
+SNULL, 140121605136384, 140121621917695,
+STORE, 140121621917696, 140121630310399,
+STORE, 140121605136384, 140121621917695,
+SNULL, 140121621921791, 140121630310399,
+STORE, 140121621917696, 140121621921791,
+STORE, 140121621921792, 140121630310399,
+STORE, 140121588346880, 140121605132287,
+STORE, 140121579954176, 140121605132287,
+SNULL, 140121412202495, 140121420591103,
+STORE, 140121412198400, 140121412202495,
+STORE, 140121412202496, 140121420591103,
+SNULL, 140121974218751, 140121982607359,
+STORE, 140121974214656, 140121974218751,
+STORE, 140121974218752, 140121982607359,
+SNULL, 140121462525952, 140121479307263,
+STORE, 140121479307264, 140121496092671,
+STORE, 140121462525952, 140121479307263,
+SNULL, 140121479311359, 140121496092671,
+STORE, 140121479307264, 140121479311359,
+STORE, 140121479311360, 140121496092671,
+STORE, 140121571561472, 140121605132287,
+SNULL, 140121571565567, 140121605132287,
+STORE, 140121571561472, 140121571565567,
+STORE, 140121571565568, 140121605132287,
+STORE, 140121428951040, 140121437343743,
+SNULL, 140121428955135, 140121437343743,
+STORE, 140121428951040, 140121428955135,
+STORE, 140121428955136, 140121437343743,
+SNULL, 140121840001024, 140121856782335,
+STORE, 140121856782336, 140121865175039,
+STORE, 140121840001024, 140121856782335,
+SNULL, 140121856786431, 140121865175039,
+STORE, 140121856782336, 140121856786431,
+STORE, 140121856786432, 140121865175039,
+STORE, 140121403805696, 140121412198399,
+SNULL, 140121840001024, 140121848389631,
+STORE, 140121848389632, 140121856782335,
+STORE, 140121840001024, 140121848389631,
+SNULL, 140121848393727, 140121856782335,
+STORE, 140121848389632, 140121848393727,
+STORE, 140121848393728, 140121856782335,
+SNULL, 140121479311360, 140121487699967,
+STORE, 140121487699968, 140121496092671,
+STORE, 140121479311360, 140121487699967,
+SNULL, 140121487704063, 140121496092671,
+STORE, 140121487699968, 140121487704063,
+STORE, 140121487704064, 140121496092671,
+STORE, 140121395412992, 140121412198399,
+STORE, 140121387020288, 140121412198399,
+SNULL, 140121387024383, 140121412198399,
+STORE, 140121387020288, 140121387024383,
+STORE, 140121387024384, 140121412198399,
+SNULL, 140121605136384, 140121613524991,
+STORE, 140121613524992, 140121621917695,
+STORE, 140121605136384, 140121613524991,
+SNULL, 140121613529087, 140121621917695,
+STORE, 140121613524992, 140121613529087,
+STORE, 140121613529088, 140121621917695,
+SNULL, 140121462525952, 140121470914559,
+STORE, 140121470914560, 140121479307263,
+STORE, 140121462525952, 140121470914559,
+SNULL, 140121470918655, 140121479307263,
+STORE, 140121470914560, 140121470918655,
+STORE, 140121470918656, 140121479307263,
+STORE, 140121378627584, 140121387020287,
+SNULL, 140121378631679, 140121387020287,
+STORE, 140121378627584, 140121378631679,
+STORE, 140121378631680, 140121387020287,
+SNULL, 140121571565568, 140121596739583,
+STORE, 140121596739584, 140121605132287,
+STORE, 140121571565568, 140121596739583,
+SNULL, 140121596743679, 140121605132287,
+STORE, 140121596739584, 140121596743679,
+STORE, 140121596743680, 140121605132287,
+SNULL, 140121387024384, 140121403805695,
+STORE, 140121403805696, 140121412198399,
+STORE, 140121387024384, 140121403805695,
+SNULL, 140121403809791, 140121412198399,
+STORE, 140121403805696, 140121403809791,
+STORE, 140121403809792, 140121412198399,
+STORE, 140121370234880, 140121378627583,
+SNULL, 140121387024384, 140121395412991,
+STORE, 140121395412992, 140121403805695,
+STORE, 140121387024384, 140121395412991,
+SNULL, 140121395417087, 140121403805695,
+STORE, 140121395412992, 140121395417087,
+STORE, 140121395417088, 140121403805695,
+SNULL, 140121571565568, 140121588346879,
+STORE, 140121588346880, 140121596739583,
+STORE, 140121571565568, 140121588346879,
+SNULL, 140121588350975, 140121596739583,
+STORE, 140121588346880, 140121588350975,
+STORE, 140121588350976, 140121596739583,
+SNULL, 140121571565568, 140121579954175,
+STORE, 140121579954176, 140121588346879,
+STORE, 140121571565568, 140121579954175,
+SNULL, 140121579958271, 140121588346879,
+STORE, 140121579954176, 140121579958271,
+STORE, 140121579958272, 140121588346879,
+STORE, 140121286373376, 140121294766079,
+STORE, 140121277980672, 140121294766079,
+SNULL, 140121277980672, 140121286373375,
+STORE, 140121286373376, 140121294766079,
+STORE, 140121277980672, 140121286373375,
+SNULL, 140121286377471, 140121294766079,
+STORE, 140121286373376, 140121286377471,
+STORE, 140121286377472, 140121294766079,
+STORE, 140121269587968, 140121286373375,
+STORE, 140121261195264, 140121286373375,
+SNULL, 140121261195264, 140121269587967,
+STORE, 140121269587968, 140121286373375,
+STORE, 140121261195264, 140121269587967,
+SNULL, 140121269592063, 140121286373375,
+STORE, 140121269587968, 140121269592063,
+STORE, 140121269592064, 140121286373375,
+STORE, 140121252802560, 140121269587967,
+SNULL, 140121252806655, 140121269587967,
+STORE, 140121252802560, 140121252806655,
+STORE, 140121252806656, 140121269587967,
+STORE, 140121244409856, 140121252802559,
+STORE, 140121236017152, 140121252802559,
+SNULL, 140121236017152, 140121244409855,
+STORE, 140121244409856, 140121252802559,
+STORE, 140121236017152, 140121244409855,
+SNULL, 140121244413951, 140121252802559,
+STORE, 140121244409856, 140121244413951,
+STORE, 140121244413952, 140121252802559,
+SNULL, 140121370238975, 140121378627583,
+STORE, 140121370234880, 140121370238975,
+STORE, 140121370238976, 140121378627583,
+STORE, 140121152155648, 140121160548351,
+STORE, 140121143762944, 140121160548351,
+STORE, 140121135370240, 140121160548351,
+SNULL, 140121135374335, 140121160548351,
+STORE, 140121135370240, 140121135374335,
+STORE, 140121135374336, 140121160548351,
+STORE, 140121126977536, 140121135370239,
+STORE, 140121118584832, 140121135370239,
+STORE, 140121110192128, 140121135370239,
+SNULL, 140121110192128, 140121118584831,
+STORE, 140121118584832, 140121135370239,
+STORE, 140121110192128, 140121118584831,
+SNULL, 140121118588927, 140121135370239,
+STORE, 140121118584832, 140121118588927,
+STORE, 140121118588928, 140121135370239,
+STORE, 140121101799424, 140121118584831,
+STORE, 140121017937920, 140121026330623,
+STORE, 140121009545216, 140121026330623,
+SNULL, 140121009545216, 140121017937919,
+STORE, 140121017937920, 140121026330623,
+STORE, 140121009545216, 140121017937919,
+SNULL, 140121017942015, 140121026330623,
+STORE, 140121017937920, 140121017942015,
+STORE, 140121017942016, 140121026330623,
+SNULL, 140121269592064, 140121277980671,
+STORE, 140121277980672, 140121286373375,
+STORE, 140121269592064, 140121277980671,
+SNULL, 140121277984767, 140121286373375,
+STORE, 140121277980672, 140121277984767,
+STORE, 140121277984768, 140121286373375,
+STORE, 140121001152512, 140121017937919,
+SNULL, 140121252806656, 140121261195263,
+STORE, 140121261195264, 140121269587967,
+STORE, 140121252806656, 140121261195263,
+SNULL, 140121261199359, 140121269587967,
+STORE, 140121261195264, 140121261199359,
+STORE, 140121261199360, 140121269587967,
+SNULL, 140121135374336, 140121152155647,
+STORE, 140121152155648, 140121160548351,
+STORE, 140121135374336, 140121152155647,
+SNULL, 140121152159743, 140121160548351,
+STORE, 140121152155648, 140121152159743,
+STORE, 140121152159744, 140121160548351,
+STORE, 140120992759808, 140121017937919,
+STORE, 140120984367104, 140121017937919,
+STORE, 140120975974400, 140121017937919,
+SNULL, 140121101799424, 140121110192127,
+STORE, 140121110192128, 140121118584831,
+STORE, 140121101799424, 140121110192127,
+SNULL, 140121110196223, 140121118584831,
+STORE, 140121110192128, 140121110196223,
+STORE, 140121110196224, 140121118584831,
+SNULL, 140121118588928, 140121126977535,
+STORE, 140121126977536, 140121135370239,
+STORE, 140121118588928, 140121126977535,
+SNULL, 140121126981631, 140121135370239,
+STORE, 140121126977536, 140121126981631,
+STORE, 140121126981632, 140121135370239,
+STORE, 140120967581696, 140121017937919,
+STORE, 140120883720192, 140120892112895,
+SNULL, 140120883724287, 140120892112895,
+STORE, 140120883720192, 140120883724287,
+STORE, 140120883724288, 140120892112895,
+STORE, 140120875327488, 140120883720191,
+SNULL, 140121101803519, 140121110192127,
+STORE, 140121101799424, 140121101803519,
+STORE, 140121101803520, 140121110192127,
+SNULL, 140121135374336, 140121143762943,
+STORE, 140121143762944, 140121152155647,
+STORE, 140121135374336, 140121143762943,
+SNULL, 140121143767039, 140121152155647,
+STORE, 140121143762944, 140121143767039,
+STORE, 140121143767040, 140121152155647,
+STORE, 140120866934784, 140120883720191,
+SNULL, 140120967581696, 140120984367103,
+STORE, 140120984367104, 140121017937919,
+STORE, 140120967581696, 140120984367103,
+SNULL, 140120984371199, 140121017937919,
+STORE, 140120984367104, 140120984371199,
+STORE, 140120984371200, 140121017937919,
+STORE, 140120858542080, 140120883720191,
+SNULL, 140121236021247, 140121244409855,
+STORE, 140121236017152, 140121236021247,
+STORE, 140121236021248, 140121244409855,
+SNULL, 140120984371200, 140121009545215,
+STORE, 140121009545216, 140121017937919,
+STORE, 140120984371200, 140121009545215,
+SNULL, 140121009549311, 140121017937919,
+STORE, 140121009545216, 140121009549311,
+STORE, 140121009549312, 140121017937919,
+SNULL, 140120984371200, 140120992759807,
+STORE, 140120992759808, 140121009545215,
+STORE, 140120984371200, 140120992759807,
+SNULL, 140120992763903, 140121009545215,
+STORE, 140120992759808, 140120992763903,
+STORE, 140120992763904, 140121009545215,
+SNULL, 140120992763904, 140121001152511,
+STORE, 140121001152512, 140121009545215,
+STORE, 140120992763904, 140121001152511,
+SNULL, 140121001156607, 140121009545215,
+STORE, 140121001152512, 140121001156607,
+STORE, 140121001156608, 140121009545215,
+STORE, 140120850149376, 140120883720191,
+SNULL, 140120850153471, 140120883720191,
+STORE, 140120850149376, 140120850153471,
+STORE, 140120850153472, 140120883720191,
+SNULL, 140120967585791, 140120984367103,
+STORE, 140120967581696, 140120967585791,
+STORE, 140120967585792, 140120984367103,
+SNULL, 140120850153472, 140120866934783,
+STORE, 140120866934784, 140120883720191,
+STORE, 140120850153472, 140120866934783,
+SNULL, 140120866938879, 140120883720191,
+STORE, 140120866934784, 140120866938879,
+STORE, 140120866938880, 140120883720191,
+STORE, 140120841756672, 140120850149375,
+SNULL, 140120967585792, 140120975974399,
+STORE, 140120975974400, 140120984367103,
+STORE, 140120967585792, 140120975974399,
+SNULL, 140120975978495, 140120984367103,
+STORE, 140120975974400, 140120975978495,
+STORE, 140120975978496, 140120984367103,
+SNULL, 140120866938880, 140120875327487,
+STORE, 140120875327488, 140120883720191,
+STORE, 140120866938880, 140120875327487,
+SNULL, 140120875331583, 140120883720191,
+STORE, 140120875327488, 140120875331583,
+STORE, 140120875331584, 140120883720191,
+STORE, 140120833363968, 140120850149375,
+STORE, 140120749502464, 140120757895167,
+STORE, 140120741109760, 140120757895167,
+STORE, 140120732717056, 140120757895167,
+STORE, 140120724324352, 140120757895167,
+SNULL, 140120724324352, 140120732717055,
+STORE, 140120732717056, 140120757895167,
+STORE, 140120724324352, 140120732717055,
+SNULL, 140120732721151, 140120757895167,
+STORE, 140120732717056, 140120732721151,
+STORE, 140120732721152, 140120757895167,
+STORE, 140120715931648, 140120732717055,
+SNULL, 140120715935743, 140120732717055,
+STORE, 140120715931648, 140120715935743,
+STORE, 140120715935744, 140120732717055,
+SNULL, 140120850153472, 140120858542079,
+STORE, 140120858542080, 140120866934783,
+STORE, 140120850153472, 140120858542079,
+SNULL, 140120858546175, 140120866934783,
+STORE, 140120858542080, 140120858546175,
+STORE, 140120858546176, 140120866934783,
+STORE, 140120707538944, 140120715931647,
+SNULL, 140120707543039, 140120715931647,
+STORE, 140120707538944, 140120707543039,
+STORE, 140120707543040, 140120715931647,
+SNULL, 140120833368063, 140120850149375,
+STORE, 140120833363968, 140120833368063,
+STORE, 140120833368064, 140120850149375,
+SNULL, 140120833368064, 140120841756671,
+STORE, 140120841756672, 140120850149375,
+STORE, 140120833368064, 140120841756671,
+SNULL, 140120841760767, 140120850149375,
+STORE, 140120841756672, 140120841760767,
+STORE, 140120841760768, 140120850149375,
+STORE, 140120699146240, 140120707538943,
+SNULL, 140120715935744, 140120724324351,
+STORE, 140120724324352, 140120732717055,
+STORE, 140120715935744, 140120724324351,
+SNULL, 140120724328447, 140120732717055,
+STORE, 140120724324352, 140120724328447,
+STORE, 140120724328448, 140120732717055,
+SNULL, 140120732721152, 140120741109759,
+STORE, 140120741109760, 140120757895167,
+STORE, 140120732721152, 140120741109759,
+SNULL, 140120741113855, 140120757895167,
+STORE, 140120741109760, 140120741113855,
+STORE, 140120741113856, 140120757895167,
+SNULL, 140120741113856, 140120749502463,
+STORE, 140120749502464, 140120757895167,
+STORE, 140120741113856, 140120749502463,
+SNULL, 140120749506559, 140120757895167,
+STORE, 140120749502464, 140120749506559,
+STORE, 140120749506560, 140120757895167,
+SNULL, 140120699150335, 140120707538943,
+STORE, 140120699146240, 140120699150335,
+STORE, 140120699150336, 140120707538943,
+STORE, 140122446557184, 140122446585855,
+STORE, 140122368999424, 140122371190783,
+SNULL, 140122368999424, 140122369089535,
+STORE, 140122369089536, 140122371190783,
+STORE, 140122368999424, 140122369089535,
+SNULL, 140122371182591, 140122371190783,
+STORE, 140122369089536, 140122371182591,
+STORE, 140122371182592, 140122371190783,
+ERASE, 140122371182592, 140122371190783,
+STORE, 140122371182592, 140122371190783,
+SNULL, 140122371186687, 140122371190783,
+STORE, 140122371182592, 140122371186687,
+STORE, 140122371186688, 140122371190783,
+ERASE, 140122446557184, 140122446585855,
+ERASE, 140121445736448, 140121445740543,
+ERASE, 140121445740544, 140121454129151,
+ERASE, 140121621917696, 140121621921791,
+ERASE, 140121621921792, 140121630310399,
+ERASE, 140121579954176, 140121579958271,
+ERASE, 140121579958272, 140121588346879,
+ERASE, 140121261195264, 140121261199359,
+ERASE, 140121261199360, 140121269587967,
+ERASE, 140121454129152, 140121454133247,
+ERASE, 140121454133248, 140121462521855,
+ERASE, 140121588346880, 140121588350975,
+ERASE, 140121588350976, 140121596739583,
+ERASE, 140121135370240, 140121135374335,
+ERASE, 140121135374336, 140121143762943,
+ERASE, 140121881960448, 140121881964543,
+ERASE, 140121881964544, 140121890353151,
+ERASE, 140121428951040, 140121428955135,
+ERASE, 140121428955136, 140121437343743,
+ERASE, 140121387020288, 140121387024383,
+ERASE, 140121387024384, 140121395412991,
+ERASE, 140121487699968, 140121487704063,
+ERASE, 140121487704064, 140121496092671,
+ERASE, 140121437343744, 140121437347839,
+ERASE, 140121437347840, 140121445736447,
+ERASE, 140121613524992, 140121613529087,
+ERASE, 140121613529088, 140121621917695,
+ERASE, 140121856782336, 140121856786431,
+ERASE, 140121856786432, 140121865175039,
+ERASE, 140121252802560, 140121252806655,
+ERASE, 140121252806656, 140121261195263,
+ERASE, 140121839996928, 140121840001023,
+ERASE, 140121840001024, 140121848389631,
+ERASE, 140121596739584, 140121596743679,
+ERASE, 140121596743680, 140121605132287,
+ERASE, 140121009545216, 140121009549311,
+ERASE, 140121009549312, 140121017937919,
+ERASE, 140120724324352, 140120724328447,
+ERASE, 140120724328448, 140120732717055,
+ERASE, 140120883720192, 140120883724287,
+ERASE, 140120883724288, 140120892112895,
+ERASE, 140121982607360, 140121982611455,
+ERASE, 140121982611456, 140121991000063,
+ERASE, 140121571561472, 140121571565567,
+ERASE, 140121571565568, 140121579954175,
+ERASE, 140121286373376, 140121286377471,
+ERASE, 140121286377472, 140121294766079,
+ERASE, 140120875327488, 140120875331583,
+ERASE, 140120875331584, 140120883720191,
+ERASE, 140121848389632, 140121848393727,
+ERASE, 140121848393728, 140121856782335,
+ERASE, 140121370234880, 140121370238975,
+ERASE, 140121370238976, 140121378627583,
+ERASE, 140121143762944, 140121143767039,
+ERASE, 140121143767040, 140121152155647,
+ERASE, 140121118584832, 140121118588927,
+ERASE, 140121118588928, 140121126977535,
+ERASE, 140120866934784, 140120866938879,
+ERASE, 140120866938880, 140120875327487,
+ERASE, 140120741109760, 140120741113855,
+ERASE, 140120741113856, 140120749502463,
+ERASE, 140121865175040, 140121865179135,
+ERASE, 140121865179136, 140121873567743,
+ERASE, 140121403805696, 140121403809791,
+ERASE, 140121403809792, 140121412198399,
+ERASE, 140121236017152, 140121236021247,
+ERASE, 140121236021248, 140121244409855,
+ERASE, 140120732717056, 140120732721151,
+ERASE, 140120732721152, 140120741109759,
+ERASE, 140121017937920, 140121017942015,
+ERASE, 140121017942016, 140121026330623,
+ERASE, 140121873567744, 140121873571839,
+ERASE, 140121873571840, 140121881960447,
+ERASE, 140121470914560, 140121470918655,
+ERASE, 140121470918656, 140121479307263,
+ERASE, 140121126977536, 140121126981631,
+ERASE, 140121126981632, 140121135370239,
+ERASE, 140120850149376, 140120850153471,
+ERASE, 140120850153472, 140120858542079,
+ERASE, 140120707538944, 140120707543039,
+ERASE, 140120707543040, 140120715931647,
+ERASE, 140121479307264, 140121479311359,
+ERASE, 140121479311360, 140121487699967,
+ERASE, 140120967581696, 140120967585791,
+ERASE, 140120967585792, 140120975974399,
+ERASE, 140120841756672, 140120841760767,
+ERASE, 140120841760768, 140120850149375,
+ERASE, 140121412198400, 140121412202495,
+ERASE, 140121412202496, 140121420591103,
+ERASE, 140122158788608, 140122158792703,
+ERASE, 140122158792704, 140122167181311,
+ERASE, 140122142003200, 140122142007295,
+ERASE, 140122142007296, 140122150395903,
+ERASE, 140121101799424, 140121101803519,
+ERASE, 140121101803520, 140121110192127,
+ERASE, 140120858542080, 140120858546175,
+ERASE, 140120858546176, 140120866934783,
+ERASE, 140120833363968, 140120833368063,
+ERASE, 140120833368064, 140120841756671,
+ERASE, 140121277980672, 140121277984767,
+ERASE, 140121277984768, 140121286373375,
+ERASE, 140121001152512, 140121001156607,
+ERASE, 140121001156608, 140121009545215,
+ERASE, 140120749502464, 140120749506559,
+ERASE, 140120749506560, 140120757895167,
+ERASE, 140121605132288, 140121605136383,
+ERASE, 140121605136384, 140121613524991,
+ERASE, 140121378627584, 140121378631679,
+ERASE, 140121378631680, 140121387020287,
+ERASE, 140121110192128, 140121110196223,
+ERASE, 140121110196224, 140121118584831,
+ERASE, 140121462521856, 140121462525951,
+ERASE, 140121462525952, 140121470914559,
+ERASE, 140121395412992, 140121395417087,
+ERASE, 140121395417088, 140121403805695,
+ERASE, 140121152155648, 140121152159743,
+ERASE, 140121152159744, 140121160548351,
+ERASE, 140120992759808, 140120992763903,
+ERASE, 140120992763904, 140121001152511,
+ERASE, 140122387976192, 140122387980287,
+ERASE, 140122387980288, 140122396368895,
+ERASE, 140121890353152, 140121890357247,
+ERASE, 140121890357248, 140121898745855,
+ERASE, 140121269587968, 140121269592063,
+ERASE, 140121269592064, 140121277980671,
+ };
+ unsigned long set37[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140722404016128, 140737488351231,
+SNULL, 140722404020223, 140737488351231,
+STORE, 140722404016128, 140722404020223,
+STORE, 140722403885056, 140722404020223,
+STORE, 94637010001920, 94637012254719,
+SNULL, 94637010132991, 94637012254719,
+STORE, 94637010001920, 94637010132991,
+STORE, 94637010132992, 94637012254719,
+ERASE, 94637010132992, 94637012254719,
+STORE, 94637012226048, 94637012234239,
+STORE, 94637012234240, 94637012254719,
+STORE, 139760240594944, 139760242847743,
+SNULL, 139760240738303, 139760242847743,
+STORE, 139760240594944, 139760240738303,
+STORE, 139760240738304, 139760242847743,
+ERASE, 139760240738304, 139760242847743,
+STORE, 139760242835456, 139760242843647,
+STORE, 139760242843648, 139760242847743,
+STORE, 140722405232640, 140722405236735,
+STORE, 140722405220352, 140722405232639,
+STORE, 139760242806784, 139760242835455,
+STORE, 139760242798592, 139760242806783,
+STORE, 139760238379008, 139760240594943,
+SNULL, 139760238379008, 139760238477311,
+STORE, 139760238477312, 139760240594943,
+STORE, 139760238379008, 139760238477311,
+SNULL, 139760240570367, 139760240594943,
+STORE, 139760238477312, 139760240570367,
+STORE, 139760240570368, 139760240594943,
+SNULL, 139760240570368, 139760240578559,
+STORE, 139760240578560, 139760240594943,
+STORE, 139760240570368, 139760240578559,
+ERASE, 139760240570368, 139760240578559,
+STORE, 139760240570368, 139760240578559,
+ERASE, 139760240578560, 139760240594943,
+STORE, 139760240578560, 139760240594943,
+STORE, 139760234582016, 139760238379007,
+SNULL, 139760234582016, 139760236240895,
+STORE, 139760236240896, 139760238379007,
+STORE, 139760234582016, 139760236240895,
+SNULL, 139760238338047, 139760238379007,
+STORE, 139760236240896, 139760238338047,
+STORE, 139760238338048, 139760238379007,
+SNULL, 139760238338048, 139760238362623,
+STORE, 139760238362624, 139760238379007,
+STORE, 139760238338048, 139760238362623,
+ERASE, 139760238338048, 139760238362623,
+STORE, 139760238338048, 139760238362623,
+ERASE, 139760238362624, 139760238379007,
+STORE, 139760238362624, 139760238379007,
+STORE, 139760242790400, 139760242806783,
+SNULL, 139760238354431, 139760238362623,
+STORE, 139760238338048, 139760238354431,
+STORE, 139760238354432, 139760238362623,
+SNULL, 139760240574463, 139760240578559,
+STORE, 139760240570368, 139760240574463,
+STORE, 139760240574464, 139760240578559,
+SNULL, 94637012230143, 94637012234239,
+STORE, 94637012226048, 94637012230143,
+STORE, 94637012230144, 94637012234239,
+SNULL, 139760242839551, 139760242843647,
+STORE, 139760242835456, 139760242839551,
+STORE, 139760242839552, 139760242843647,
+ERASE, 139760242806784, 139760242835455,
+STORE, 94637033324544, 94637033459711,
+STORE, 139760226189312, 139760234582015,
+SNULL, 139760226193407, 139760234582015,
+STORE, 139760226189312, 139760226193407,
+STORE, 139760226193408, 139760234582015,
+STORE, 139760217796608, 139760226189311,
+STORE, 139760083578880, 139760217796607,
+SNULL, 139760083578880, 139760114860031,
+STORE, 139760114860032, 139760217796607,
+STORE, 139760083578880, 139760114860031,
+ERASE, 139760083578880, 139760114860031,
+SNULL, 139760181968895, 139760217796607,
+STORE, 139760114860032, 139760181968895,
+STORE, 139760181968896, 139760217796607,
+ERASE, 139760181968896, 139760217796607,
+SNULL, 139760114995199, 139760181968895,
+STORE, 139760114860032, 139760114995199,
+STORE, 139760114995200, 139760181968895,
+SNULL, 139760217800703, 139760226189311,
+STORE, 139760217796608, 139760217800703,
+STORE, 139760217800704, 139760226189311,
+STORE, 139760209403904, 139760217796607,
+SNULL, 139760209407999, 139760217796607,
+STORE, 139760209403904, 139760209407999,
+STORE, 139760209408000, 139760217796607,
+STORE, 139760201011200, 139760209403903,
+SNULL, 139760201015295, 139760209403903,
+STORE, 139760201011200, 139760201015295,
+STORE, 139760201015296, 139760209403903,
+STORE, 139760192618496, 139760201011199,
+SNULL, 139760192622591, 139760201011199,
+STORE, 139760192618496, 139760192622591,
+STORE, 139760192622592, 139760201011199,
+STORE, 139760184225792, 139760192618495,
+STORE, 139759980642304, 139760114860031,
+STORE, 139759972249600, 139759980642303,
+STORE, 139759963856896, 139759980642303,
+STORE, 139759955464192, 139759980642303,
+STORE, 139759888355328, 139759955464191,
+SNULL, 139760047751167, 139760114860031,
+STORE, 139759980642304, 139760047751167,
+STORE, 139760047751168, 139760114860031,
+ERASE, 139760047751168, 139760114860031,
+SNULL, 139759980777471, 139760047751167,
+STORE, 139759980642304, 139759980777471,
+STORE, 139759980777472, 139760047751167,
+STORE, 139759980777472, 139760114860031,
+SNULL, 139759980777472, 139760047751167,
+STORE, 139760047751168, 139760114860031,
+STORE, 139759980777472, 139760047751167,
+SNULL, 139760047886335, 139760114860031,
+STORE, 139760047751168, 139760047886335,
+STORE, 139760047886336, 139760114860031,
+STORE, 139759821246464, 139759955464191,
+SNULL, 139759821246464, 139759888355327,
+STORE, 139759888355328, 139759955464191,
+STORE, 139759821246464, 139759888355327,
+ERASE, 139759821246464, 139759888355327,
+ERASE, 139759888355328, 139759955464191,
+ };
+ unsigned long set38[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140730666221568, 140737488351231,
+SNULL, 140730666225663, 140737488351231,
+STORE, 140730666221568, 140730666225663,
+STORE, 140730666090496, 140730666225663,
+STORE, 94177584803840, 94177587056639,
+SNULL, 94177584934911, 94177587056639,
+STORE, 94177584803840, 94177584934911,
+STORE, 94177584934912, 94177587056639,
+ERASE, 94177584934912, 94177587056639,
+STORE, 94177587027968, 94177587036159,
+STORE, 94177587036160, 94177587056639,
+STORE, 140614382714880, 140614384967679,
+SNULL, 140614382858239, 140614384967679,
+STORE, 140614382714880, 140614382858239,
+STORE, 140614382858240, 140614384967679,
+ERASE, 140614382858240, 140614384967679,
+STORE, 140614384955392, 140614384963583,
+STORE, 140614384963584, 140614384967679,
+STORE, 140730666315776, 140730666319871,
+STORE, 140730666303488, 140730666315775,
+STORE, 140614384926720, 140614384955391,
+STORE, 140614384918528, 140614384926719,
+STORE, 140614380498944, 140614382714879,
+SNULL, 140614380498944, 140614380597247,
+STORE, 140614380597248, 140614382714879,
+STORE, 140614380498944, 140614380597247,
+SNULL, 140614382690303, 140614382714879,
+STORE, 140614380597248, 140614382690303,
+STORE, 140614382690304, 140614382714879,
+SNULL, 140614382690304, 140614382698495,
+STORE, 140614382698496, 140614382714879,
+STORE, 140614382690304, 140614382698495,
+ERASE, 140614382690304, 140614382698495,
+STORE, 140614382690304, 140614382698495,
+ERASE, 140614382698496, 140614382714879,
+STORE, 140614382698496, 140614382714879,
+STORE, 140614376701952, 140614380498943,
+SNULL, 140614376701952, 140614378360831,
+STORE, 140614378360832, 140614380498943,
+STORE, 140614376701952, 140614378360831,
+SNULL, 140614380457983, 140614380498943,
+STORE, 140614378360832, 140614380457983,
+STORE, 140614380457984, 140614380498943,
+SNULL, 140614380457984, 140614380482559,
+STORE, 140614380482560, 140614380498943,
+STORE, 140614380457984, 140614380482559,
+ERASE, 140614380457984, 140614380482559,
+STORE, 140614380457984, 140614380482559,
+ERASE, 140614380482560, 140614380498943,
+STORE, 140614380482560, 140614380498943,
+STORE, 140614384910336, 140614384926719,
+SNULL, 140614380474367, 140614380482559,
+STORE, 140614380457984, 140614380474367,
+STORE, 140614380474368, 140614380482559,
+SNULL, 140614382694399, 140614382698495,
+STORE, 140614382690304, 140614382694399,
+STORE, 140614382694400, 140614382698495,
+SNULL, 94177587032063, 94177587036159,
+STORE, 94177587027968, 94177587032063,
+STORE, 94177587032064, 94177587036159,
+SNULL, 140614384959487, 140614384963583,
+STORE, 140614384955392, 140614384959487,
+STORE, 140614384959488, 140614384963583,
+ERASE, 140614384926720, 140614384955391,
+STORE, 94177619791872, 94177619927039,
+STORE, 140614368309248, 140614376701951,
+SNULL, 140614368313343, 140614376701951,
+STORE, 140614368309248, 140614368313343,
+STORE, 140614368313344, 140614376701951,
+STORE, 140614359916544, 140614368309247,
+STORE, 140614225698816, 140614359916543,
+SNULL, 140614225698816, 140614276481023,
+STORE, 140614276481024, 140614359916543,
+STORE, 140614225698816, 140614276481023,
+ERASE, 140614225698816, 140614276481023,
+SNULL, 140614343589887, 140614359916543,
+STORE, 140614276481024, 140614343589887,
+STORE, 140614343589888, 140614359916543,
+ERASE, 140614343589888, 140614359916543,
+SNULL, 140614276616191, 140614343589887,
+STORE, 140614276481024, 140614276616191,
+STORE, 140614276616192, 140614343589887,
+SNULL, 140614359920639, 140614368309247,
+STORE, 140614359916544, 140614359920639,
+STORE, 140614359920640, 140614368309247,
+STORE, 140614351523840, 140614359916543,
+SNULL, 140614351527935, 140614359916543,
+STORE, 140614351523840, 140614351527935,
+STORE, 140614351527936, 140614359916543,
+STORE, 140614268088320, 140614276481023,
+SNULL, 140614268092415, 140614276481023,
+STORE, 140614268088320, 140614268092415,
+STORE, 140614268092416, 140614276481023,
+STORE, 140614259695616, 140614268088319,
+SNULL, 140614259699711, 140614268088319,
+STORE, 140614259695616, 140614259699711,
+STORE, 140614259699712, 140614268088319,
+STORE, 140614251302912, 140614259695615,
+STORE, 140614242910208, 140614259695615,
+STORE, 140614108692480, 140614242910207,
+SNULL, 140614108692480, 140614142263295,
+STORE, 140614142263296, 140614242910207,
+STORE, 140614108692480, 140614142263295,
+ERASE, 140614108692480, 140614142263295,
+STORE, 140614133870592, 140614142263295,
+STORE, 140613999652864, 140614133870591,
+SNULL, 140613999652864, 140614008045567,
+STORE, 140614008045568, 140614133870591,
+STORE, 140613999652864, 140614008045567,
+ERASE, 140613999652864, 140614008045567,
+STORE, 140613999652864, 140614008045567,
+STORE, 140613865435136, 140613999652863,
+SNULL, 140613865435136, 140613873827839,
+STORE, 140613873827840, 140613999652863,
+STORE, 140613865435136, 140613873827839,
+ERASE, 140613865435136, 140613873827839,
+SNULL, 140614209372159, 140614242910207,
+STORE, 140614142263296, 140614209372159,
+STORE, 140614209372160, 140614242910207,
+ERASE, 140614209372160, 140614242910207,
+SNULL, 140614142398463, 140614209372159,
+STORE, 140614142263296, 140614142398463,
+STORE, 140614142398464, 140614209372159,
+SNULL, 140614075154431, 140614133870591,
+STORE, 140614008045568, 140614075154431,
+STORE, 140614075154432, 140614133870591,
+ERASE, 140614075154432, 140614133870591,
+SNULL, 140614008180735, 140614075154431,
+STORE, 140614008045568, 140614008180735,
+STORE, 140614008180736, 140614075154431,
+SNULL, 140613940936703, 140613999652863,
+STORE, 140613873827840, 140613940936703,
+STORE, 140613940936704, 140613999652863,
+ERASE, 140613940936704, 140613999652863,
+SNULL, 140614242914303, 140614259695615,
+STORE, 140614242910208, 140614242914303,
+STORE, 140614242914304, 140614259695615,
+STORE, 140613739610112, 140613940936703,
+STORE, 140614234517504, 140614242910207,
+SNULL, 140614242914304, 140614251302911,
+STORE, 140614251302912, 140614259695615,
+STORE, 140614242914304, 140614251302911,
+SNULL, 140614251307007, 140614259695615,
+STORE, 140614251302912, 140614251307007,
+STORE, 140614251307008, 140614259695615,
+SNULL, 140613739610112, 140613873827839,
+STORE, 140613873827840, 140613940936703,
+STORE, 140613739610112, 140613873827839,
+SNULL, 140613873963007, 140613940936703,
+STORE, 140613873827840, 140613873963007,
+STORE, 140613873963008, 140613940936703,
+SNULL, 140614133874687, 140614142263295,
+STORE, 140614133870592, 140614133874687,
+STORE, 140614133874688, 140614142263295,
+SNULL, 140613806718975, 140613873827839,
+STORE, 140613739610112, 140613806718975,
+STORE, 140613806718976, 140613873827839,
+ERASE, 140613806718976, 140613873827839,
+STORE, 140614226124800, 140614242910207,
+SNULL, 140613739745279, 140613806718975,
+STORE, 140613739610112, 140613739745279,
+STORE, 140613739745280, 140613806718975,
+SNULL, 140613999656959, 140614008045567,
+STORE, 140613999652864, 140613999656959,
+STORE, 140613999656960, 140614008045567,
+SNULL, 140614226124800, 140614234517503,
+STORE, 140614234517504, 140614242910207,
+STORE, 140614226124800, 140614234517503,
+SNULL, 140614234521599, 140614242910207,
+STORE, 140614234517504, 140614234521599,
+STORE, 140614234521600, 140614242910207,
+STORE, 140614217732096, 140614234517503,
+STORE, 140614125477888, 140614133870591,
+SNULL, 140614125481983, 140614133870591,
+STORE, 140614125477888, 140614125481983,
+STORE, 140614125481984, 140614133870591,
+STORE, 140614117085184, 140614125477887,
+SNULL, 140614217736191, 140614234517503,
+STORE, 140614217732096, 140614217736191,
+STORE, 140614217736192, 140614234517503,
+SNULL, 140614117089279, 140614125477887,
+STORE, 140614117085184, 140614117089279,
+STORE, 140614117089280, 140614125477887,
+SNULL, 140614217736192, 140614226124799,
+STORE, 140614226124800, 140614234517503,
+STORE, 140614217736192, 140614226124799,
+SNULL, 140614226128895, 140614234517503,
+STORE, 140614226124800, 140614226128895,
+STORE, 140614226128896, 140614234517503,
+STORE, 140614108692480, 140614117085183,
+STORE, 140614100299776, 140614117085183,
+STORE, 140614091907072, 140614117085183,
+SNULL, 140614091907072, 140614108692479,
+STORE, 140614108692480, 140614117085183,
+STORE, 140614091907072, 140614108692479,
+SNULL, 140614108696575, 140614117085183,
+STORE, 140614108692480, 140614108696575,
+STORE, 140614108696576, 140614117085183,
+SNULL, 140614091907072, 140614100299775,
+STORE, 140614100299776, 140614108692479,
+STORE, 140614091907072, 140614100299775,
+SNULL, 140614100303871, 140614108692479,
+STORE, 140614100299776, 140614100303871,
+STORE, 140614100303872, 140614108692479,
+STORE, 140614083514368, 140614100299775,
+SNULL, 140614083518463, 140614100299775,
+STORE, 140614083514368, 140614083518463,
+STORE, 140614083518464, 140614100299775,
+STORE, 140613991260160, 140613999652863,
+SNULL, 140614083518464, 140614091907071,
+STORE, 140614091907072, 140614100299775,
+STORE, 140614083518464, 140614091907071,
+SNULL, 140614091911167, 140614100299775,
+STORE, 140614091907072, 140614091911167,
+STORE, 140614091911168, 140614100299775,
+SNULL, 140613991264255, 140613999652863,
+STORE, 140613991260160, 140613991264255,
+STORE, 140613991264256, 140613999652863,
+STORE, 140613982867456, 140613991260159,
+SNULL, 140613982871551, 140613991260159,
+STORE, 140613982867456, 140613982871551,
+STORE, 140613982871552, 140613991260159,
+STORE, 140613974474752, 140613982867455,
+SNULL, 140613974478847, 140613982867455,
+STORE, 140613974474752, 140613974478847,
+STORE, 140613974478848, 140613982867455,
+STORE, 140613966082048, 140613974474751,
+STORE, 140613739745280, 140613873827839,
+SNULL, 140613739745280, 140613806718975,
+STORE, 140613806718976, 140613873827839,
+STORE, 140613739745280, 140613806718975,
+SNULL, 140613806854143, 140613873827839,
+STORE, 140613806718976, 140613806854143,
+STORE, 140613806854144, 140613873827839,
+SNULL, 140613966086143, 140613974474751,
+STORE, 140613966082048, 140613966086143,
+STORE, 140613966086144, 140613974474751,
+STORE, 140613957689344, 140613966082047,
+STORE, 140613605392384, 140613739610111,
+STORE, 140613949296640, 140613966082047,
+STORE, 140613596999680, 140613605392383,
+STORE, 140613529890816, 140613596999679,
+STORE, 140613521498112, 140613529890815,
+STORE, 140613513105408, 140613529890815,
+STORE, 140613378887680, 140613513105407,
+SNULL, 140613378887680, 140613404065791,
+STORE, 140613404065792, 140613513105407,
+STORE, 140613378887680, 140613404065791,
+ERASE, 140613378887680, 140613404065791,
+STORE, 140613395673088, 140613404065791,
+STORE, 140613261455360, 140613395673087,
+SNULL, 140613261455360, 140613269848063,
+STORE, 140613269848064, 140613395673087,
+STORE, 140613261455360, 140613269848063,
+ERASE, 140613261455360, 140613269848063,
+STORE, 140613261455360, 140613269848063,
+STORE, 140613253062656, 140613269848063,
+STORE, 140613118844928, 140613253062655,
+STORE, 140613110452224, 140613118844927,
+SNULL, 140613118844928, 140613135630335,
+STORE, 140613135630336, 140613253062655,
+STORE, 140613118844928, 140613135630335,
+ERASE, 140613118844928, 140613135630335,
+STORE, 140613127237632, 140613135630335,
+STORE, 140613110452224, 140613135630335,
+STORE, 140612976234496, 140613110452223,
+STORE, 140612967841792, 140612976234495,
+STORE, 140612833624064, 140612967841791,
+STORE, 140612825231360, 140612833624063,
+STORE, 140612816838656, 140612833624063,
+STORE, 140612682620928, 140612816838655,
+STORE, 140612674228224, 140612682620927,
+SNULL, 140612682620928, 140612732977151,
+STORE, 140612732977152, 140612816838655,
+STORE, 140612682620928, 140612732977151,
+ERASE, 140612682620928, 140612732977151,
+SNULL, 140613672501247, 140613739610111,
+STORE, 140613605392384, 140613672501247,
+STORE, 140613672501248, 140613739610111,
+ERASE, 140613672501248, 140613739610111,
+SNULL, 140613605527551, 140613672501247,
+STORE, 140613605392384, 140613605527551,
+STORE, 140613605527552, 140613672501247,
+ERASE, 140613529890816, 140613596999679,
+STORE, 140612540010496, 140612674228223,
+SNULL, 140612540010496, 140612598759423,
+STORE, 140612598759424, 140612674228223,
+STORE, 140612540010496, 140612598759423,
+ERASE, 140612540010496, 140612598759423,
+SNULL, 140613471174655, 140613513105407,
+STORE, 140613404065792, 140613471174655,
+STORE, 140613471174656, 140613513105407,
+ERASE, 140613471174656, 140613513105407,
+SNULL, 140613404200959, 140613471174655,
+STORE, 140613404065792, 140613404200959,
+STORE, 140613404200960, 140613471174655,
+SNULL, 140613336956927, 140613395673087,
+STORE, 140613269848064, 140613336956927,
+STORE, 140613336956928, 140613395673087,
+ERASE, 140613336956928, 140613395673087,
+SNULL, 140612833624064, 140612867194879,
+STORE, 140612867194880, 140612967841791,
+STORE, 140612833624064, 140612867194879,
+ERASE, 140612833624064, 140612867194879,
+SNULL, 140612976234496, 140613001412607,
+STORE, 140613001412608, 140613110452223,
+STORE, 140612976234496, 140613001412607,
+ERASE, 140612976234496, 140613001412607,
+SNULL, 140613202739199, 140613253062655,
+STORE, 140613135630336, 140613202739199,
+STORE, 140613202739200, 140613253062655,
+ERASE, 140613202739200, 140613253062655,
+SNULL, 140613135765503, 140613202739199,
+STORE, 140613135630336, 140613135765503,
+STORE, 140613135765504, 140613202739199,
+SNULL, 140612816842751, 140612833624063,
+STORE, 140612816838656, 140612816842751,
+STORE, 140612816842752, 140612833624063,
+SNULL, 140613110456319, 140613135630335,
+STORE, 140613110452224, 140613110456319,
+STORE, 140613110456320, 140613135630335,
+SNULL, 140613949300735, 140613966082047,
+STORE, 140613949296640, 140613949300735,
+STORE, 140613949300736, 140613966082047,
+SNULL, 140613110456320, 140613118844927,
+STORE, 140613118844928, 140613135630335,
+STORE, 140613110456320, 140613118844927,
+SNULL, 140613118849023, 140613135630335,
+STORE, 140613118844928, 140613118849023,
+STORE, 140613118849024, 140613135630335,
+SNULL, 140612800086015, 140612816838655,
+STORE, 140612732977152, 140612800086015,
+STORE, 140612800086016, 140612816838655,
+ERASE, 140612800086016, 140612816838655,
+SNULL, 140613253062656, 140613261455359,
+STORE, 140613261455360, 140613269848063,
+STORE, 140613253062656, 140613261455359,
+SNULL, 140613261459455, 140613269848063,
+STORE, 140613261455360, 140613261459455,
+STORE, 140613261459456, 140613269848063,
+SNULL, 140612674232319, 140612682620927,
+STORE, 140612674228224, 140612674232319,
+STORE, 140612674232320, 140612682620927,
+STORE, 140613731217408, 140613739610111,
+STORE, 140613722824704, 140613739610111,
+SNULL, 140613949300736, 140613957689343,
+STORE, 140613957689344, 140613966082047,
+STORE, 140613949300736, 140613957689343,
+SNULL, 140613957693439, 140613966082047,
+STORE, 140613957689344, 140613957693439,
+STORE, 140613957693440, 140613966082047,
+STORE, 140612464541696, 140612674228223,
+SNULL, 140612531650559, 140612674228223,
+STORE, 140612464541696, 140612531650559,
+STORE, 140612531650560, 140612674228223,
+SNULL, 140612531650560, 140612598759423,
+STORE, 140612598759424, 140612674228223,
+STORE, 140612531650560, 140612598759423,
+ERASE, 140612531650560, 140612598759423,
+SNULL, 140612665868287, 140612674228223,
+STORE, 140612598759424, 140612665868287,
+STORE, 140612665868288, 140612674228223,
+ERASE, 140612665868288, 140612674228223,
+SNULL, 140613269983231, 140613336956927,
+STORE, 140613269848064, 140613269983231,
+STORE, 140613269983232, 140613336956927,
+SNULL, 140612934303743, 140612967841791,
+STORE, 140612867194880, 140612934303743,
+STORE, 140612934303744, 140612967841791,
+ERASE, 140612934303744, 140612967841791,
+SNULL, 140613068521471, 140613110452223,
+STORE, 140613001412608, 140613068521471,
+STORE, 140613068521472, 140613110452223,
+ERASE, 140613068521472, 140613110452223,
+STORE, 140613714432000, 140613739610111,
+SNULL, 140613001547775, 140613068521471,
+STORE, 140613001412608, 140613001547775,
+STORE, 140613001547776, 140613068521471,
+SNULL, 140612733112319, 140612800086015,
+STORE, 140612732977152, 140612733112319,
+STORE, 140612733112320, 140612800086015,
+SNULL, 140613513109503, 140613529890815,
+STORE, 140613513105408, 140613513109503,
+STORE, 140613513109504, 140613529890815,
+STORE, 140613706039296, 140613739610111,
+STORE, 140613697646592, 140613739610111,
+STORE, 140613689253888, 140613739610111,
+SNULL, 140613689257983, 140613739610111,
+STORE, 140613689253888, 140613689257983,
+STORE, 140613689257984, 140613739610111,
+SNULL, 140613253066751, 140613261455359,
+STORE, 140613253062656, 140613253066751,
+STORE, 140613253066752, 140613261455359,
+STORE, 140613680861184, 140613689253887,
+STORE, 140613588606976, 140613605392383,
+SNULL, 140613689257984, 140613731217407,
+STORE, 140613731217408, 140613739610111,
+STORE, 140613689257984, 140613731217407,
+SNULL, 140613731221503, 140613739610111,
+STORE, 140613731217408, 140613731221503,
+STORE, 140613731221504, 140613739610111,
+STORE, 140613580214272, 140613605392383,
+SNULL, 140612464676863, 140612531650559,
+STORE, 140612464541696, 140612464676863,
+STORE, 140612464676864, 140612531650559,
+SNULL, 140612598894591, 140612665868287,
+STORE, 140612598759424, 140612598894591,
+STORE, 140612598894592, 140612665868287,
+SNULL, 140612867330047, 140612934303743,
+STORE, 140612867194880, 140612867330047,
+STORE, 140612867330048, 140612934303743,
+STORE, 140613571821568, 140613605392383,
+SNULL, 140613571825663, 140613605392383,
+STORE, 140613571821568, 140613571825663,
+STORE, 140613571825664, 140613605392383,
+SNULL, 140613689257984, 140613722824703,
+STORE, 140613722824704, 140613731217407,
+STORE, 140613689257984, 140613722824703,
+SNULL, 140613722828799, 140613731217407,
+STORE, 140613722824704, 140613722828799,
+STORE, 140613722828800, 140613731217407,
+SNULL, 140613689257984, 140613714431999,
+STORE, 140613714432000, 140613722824703,
+STORE, 140613689257984, 140613714431999,
+SNULL, 140613714436095, 140613722824703,
+STORE, 140613714432000, 140613714436095,
+STORE, 140613714436096, 140613722824703,
+SNULL, 140612816842752, 140612825231359,
+STORE, 140612825231360, 140612833624063,
+STORE, 140612816842752, 140612825231359,
+SNULL, 140612825235455, 140612833624063,
+STORE, 140612825231360, 140612825235455,
+STORE, 140612825235456, 140612833624063,
+SNULL, 140613395677183, 140613404065791,
+STORE, 140613395673088, 140613395677183,
+STORE, 140613395677184, 140613404065791,
+SNULL, 140613689257984, 140613706039295,
+STORE, 140613706039296, 140613714431999,
+STORE, 140613689257984, 140613706039295,
+SNULL, 140613706043391, 140613714431999,
+STORE, 140613706039296, 140613706043391,
+STORE, 140613706043392, 140613714431999,
+SNULL, 140613118849024, 140613127237631,
+STORE, 140613127237632, 140613135630335,
+STORE, 140613118849024, 140613127237631,
+SNULL, 140613127241727, 140613135630335,
+STORE, 140613127237632, 140613127241727,
+STORE, 140613127241728, 140613135630335,
+SNULL, 140613571825664, 140613580214271,
+STORE, 140613580214272, 140613605392383,
+STORE, 140613571825664, 140613580214271,
+SNULL, 140613580218367, 140613605392383,
+STORE, 140613580214272, 140613580218367,
+STORE, 140613580218368, 140613605392383,
+SNULL, 140613689257984, 140613697646591,
+STORE, 140613697646592, 140613706039295,
+STORE, 140613689257984, 140613697646591,
+SNULL, 140613697650687, 140613706039295,
+STORE, 140613697646592, 140613697650687,
+STORE, 140613697650688, 140613706039295,
+SNULL, 140613680865279, 140613689253887,
+STORE, 140613680861184, 140613680865279,
+STORE, 140613680865280, 140613689253887,
+STORE, 140613563428864, 140613571821567,
+SNULL, 140613563432959, 140613571821567,
+STORE, 140613563428864, 140613563432959,
+STORE, 140613563432960, 140613571821567,
+SNULL, 140613580218368, 140613588606975,
+STORE, 140613588606976, 140613605392383,
+STORE, 140613580218368, 140613588606975,
+SNULL, 140613588611071, 140613605392383,
+STORE, 140613588606976, 140613588611071,
+STORE, 140613588611072, 140613605392383,
+SNULL, 140613513109504, 140613521498111,
+STORE, 140613521498112, 140613529890815,
+STORE, 140613513109504, 140613521498111,
+SNULL, 140613521502207, 140613529890815,
+STORE, 140613521498112, 140613521502207,
+STORE, 140613521502208, 140613529890815,
+SNULL, 140613588611072, 140613596999679,
+STORE, 140613596999680, 140613605392383,
+STORE, 140613588611072, 140613596999679,
+SNULL, 140613597003775, 140613605392383,
+STORE, 140613596999680, 140613597003775,
+STORE, 140613597003776, 140613605392383,
+STORE, 140613555036160, 140613563428863,
+SNULL, 140613555040255, 140613563428863,
+STORE, 140613555036160, 140613555040255,
+STORE, 140613555040256, 140613563428863,
+STORE, 140613546643456, 140613555036159,
+STORE, 140613538250752, 140613555036159,
+SNULL, 140613538250752, 140613546643455,
+STORE, 140613546643456, 140613555036159,
+STORE, 140613538250752, 140613546643455,
+SNULL, 140613546647551, 140613555036159,
+STORE, 140613546643456, 140613546647551,
+STORE, 140613546647552, 140613555036159,
+STORE, 140613504712704, 140613513105407,
+STORE, 140613496320000, 140613513105407,
+SNULL, 140613496324095, 140613513105407,
+STORE, 140613496320000, 140613496324095,
+STORE, 140613496324096, 140613513105407,
+STORE, 140613487927296, 140613496319999,
+SNULL, 140613487931391, 140613496319999,
+STORE, 140613487927296, 140613487931391,
+STORE, 140613487931392, 140613496319999,
+STORE, 140613479534592, 140613487927295,
+SNULL, 140612967845887, 140612976234495,
+STORE, 140612967841792, 140612967845887,
+STORE, 140612967845888, 140612976234495,
+STORE, 140613387280384, 140613395673087,
+STORE, 140613378887680, 140613395673087,
+SNULL, 140613378887680, 140613387280383,
+STORE, 140613387280384, 140613395673087,
+STORE, 140613378887680, 140613387280383,
+SNULL, 140613387284479, 140613395673087,
+STORE, 140613387280384, 140613387284479,
+STORE, 140613387284480, 140613395673087,
+STORE, 140613370494976, 140613387280383,
+STORE, 140613362102272, 140613387280383,
+SNULL, 140613479538687, 140613487927295,
+STORE, 140613479534592, 140613479538687,
+STORE, 140613479538688, 140613487927295,
+STORE, 140613353709568, 140613387280383,
+STORE, 140613345316864, 140613387280383,
+STORE, 140613244669952, 140613253062655,
+SNULL, 140613345320959, 140613387280383,
+STORE, 140613345316864, 140613345320959,
+STORE, 140613345320960, 140613387280383,
+SNULL, 140613538254847, 140613546643455,
+STORE, 140613538250752, 140613538254847,
+STORE, 140613538254848, 140613546643455,
+STORE, 140613236277248, 140613253062655,
+STORE, 140613227884544, 140613253062655,
+STORE, 140613219491840, 140613253062655,
+STORE, 140613211099136, 140613253062655,
+SNULL, 140613211103231, 140613253062655,
+STORE, 140613211099136, 140613211103231,
+STORE, 140613211103232, 140613253062655,
+STORE, 140613102059520, 140613110452223,
+STORE, 140613093666816, 140613110452223,
+SNULL, 140613093670911, 140613110452223,
+STORE, 140613093666816, 140613093670911,
+STORE, 140613093670912, 140613110452223,
+STORE, 140613085274112, 140613093666815,
+SNULL, 140613496324096, 140613504712703,
+STORE, 140613504712704, 140613513105407,
+STORE, 140613496324096, 140613504712703,
+SNULL, 140613504716799, 140613513105407,
+STORE, 140613504712704, 140613504716799,
+STORE, 140613504716800, 140613513105407,
+SNULL, 140613345320960, 140613378887679,
+STORE, 140613378887680, 140613387280383,
+STORE, 140613345320960, 140613378887679,
+SNULL, 140613378891775, 140613387280383,
+STORE, 140613378887680, 140613378891775,
+STORE, 140613378891776, 140613387280383,
+SNULL, 140613345320960, 140613362102271,
+STORE, 140613362102272, 140613378887679,
+STORE, 140613345320960, 140613362102271,
+SNULL, 140613362106367, 140613378887679,
+STORE, 140613362102272, 140613362106367,
+STORE, 140613362106368, 140613378887679,
+SNULL, 140613362106368, 140613370494975,
+STORE, 140613370494976, 140613378887679,
+STORE, 140613362106368, 140613370494975,
+SNULL, 140613370499071, 140613378887679,
+STORE, 140613370494976, 140613370499071,
+STORE, 140613370499072, 140613378887679,
+STORE, 140613076881408, 140613093666815,
+STORE, 140612993019904, 140613001412607,
+SNULL, 140613076885503, 140613093666815,
+STORE, 140613076881408, 140613076885503,
+STORE, 140613076885504, 140613093666815,
+SNULL, 140613093670912, 140613102059519,
+STORE, 140613102059520, 140613110452223,
+STORE, 140613093670912, 140613102059519,
+SNULL, 140613102063615, 140613110452223,
+STORE, 140613102059520, 140613102063615,
+STORE, 140613102063616, 140613110452223,
+SNULL, 140613076885504, 140613085274111,
+STORE, 140613085274112, 140613093666815,
+STORE, 140613076885504, 140613085274111,
+SNULL, 140613085278207, 140613093666815,
+STORE, 140613085274112, 140613085278207,
+STORE, 140613085278208, 140613093666815,
+STORE, 140612984627200, 140613001412607,
+STORE, 140612967845888, 140612984627199,
+SNULL, 140613211103232, 140613219491839,
+STORE, 140613219491840, 140613253062655,
+STORE, 140613211103232, 140613219491839,
+SNULL, 140613219495935, 140613253062655,
+STORE, 140613219491840, 140613219495935,
+STORE, 140613219495936, 140613253062655,
+STORE, 140612959449088, 140612967841791,
+STORE, 140612951056384, 140612967841791,
+SNULL, 140612951060479, 140612967841791,
+STORE, 140612951056384, 140612951060479,
+STORE, 140612951060480, 140612967841791,
+SNULL, 140613345320960, 140613353709567,
+STORE, 140613353709568, 140613362102271,
+STORE, 140613345320960, 140613353709567,
+SNULL, 140613353713663, 140613362102271,
+STORE, 140613353709568, 140613353713663,
+STORE, 140613353713664, 140613362102271,
+SNULL, 140613219495936, 140613244669951,
+STORE, 140613244669952, 140613253062655,
+STORE, 140613219495936, 140613244669951,
+SNULL, 140613244674047, 140613253062655,
+STORE, 140613244669952, 140613244674047,
+STORE, 140613244674048, 140613253062655,
+STORE, 140612942663680, 140612951056383,
+SNULL, 140613219495936, 140613236277247,
+STORE, 140613236277248, 140613244669951,
+STORE, 140613219495936, 140613236277247,
+SNULL, 140613236281343, 140613244669951,
+STORE, 140613236277248, 140613236281343,
+STORE, 140613236281344, 140613244669951,
+SNULL, 140613219495936, 140613227884543,
+STORE, 140613227884544, 140613236277247,
+STORE, 140613219495936, 140613227884543,
+SNULL, 140613227888639, 140613236277247,
+STORE, 140613227884544, 140613227888639,
+STORE, 140613227888640, 140613236277247,
+SNULL, 140612984627200, 140612993019903,
+STORE, 140612993019904, 140613001412607,
+STORE, 140612984627200, 140612993019903,
+SNULL, 140612993023999, 140613001412607,
+STORE, 140612993019904, 140612993023999,
+STORE, 140612993024000, 140613001412607,
+STORE, 140612858802176, 140612867194879,
+STORE, 140612850409472, 140612867194879,
+SNULL, 140612951060480, 140612959449087,
+STORE, 140612959449088, 140612967841791,
+STORE, 140612951060480, 140612959449087,
+SNULL, 140612959453183, 140612967841791,
+STORE, 140612959449088, 140612959453183,
+STORE, 140612959453184, 140612967841791,
+SNULL, 140612967845888, 140612976234495,
+STORE, 140612976234496, 140612984627199,
+STORE, 140612967845888, 140612976234495,
+SNULL, 140612976238591, 140612984627199,
+STORE, 140612976234496, 140612976238591,
+STORE, 140612976238592, 140612984627199,
+STORE, 140612842016768, 140612867194879,
+SNULL, 140612842020863, 140612867194879,
+STORE, 140612842016768, 140612842020863,
+STORE, 140612842020864, 140612867194879,
+SNULL, 140612984631295, 140612993019903,
+STORE, 140612984627200, 140612984631295,
+STORE, 140612984631296, 140612993019903,
+STORE, 140612825235456, 140612842016767,
+STORE, 140612808445952, 140612816838655,
+SNULL, 140612942667775, 140612951056383,
+STORE, 140612942663680, 140612942667775,
+STORE, 140612942667776, 140612951056383,
+STORE, 140612724584448, 140612732977151,
+SNULL, 140612724588543, 140612732977151,
+STORE, 140612724584448, 140612724588543,
+STORE, 140612724588544, 140612732977151,
+STORE, 140612716191744, 140612724584447,
+SNULL, 140612842020864, 140612850409471,
+STORE, 140612850409472, 140612867194879,
+STORE, 140612842020864, 140612850409471,
+SNULL, 140612850413567, 140612867194879,
+STORE, 140612850409472, 140612850413567,
+STORE, 140612850413568, 140612867194879,
+SNULL, 140612850413568, 140612858802175,
+STORE, 140612858802176, 140612867194879,
+STORE, 140612850413568, 140612858802175,
+SNULL, 140612858806271, 140612867194879,
+STORE, 140612858802176, 140612858806271,
+STORE, 140612858806272, 140612867194879,
+STORE, 140612707799040, 140612724584447,
+SNULL, 140612707803135, 140612724584447,
+STORE, 140612707799040, 140612707803135,
+STORE, 140612707803136, 140612724584447,
+SNULL, 140612707803136, 140612716191743,
+STORE, 140612716191744, 140612724584447,
+STORE, 140612707803136, 140612716191743,
+SNULL, 140612716195839, 140612724584447,
+STORE, 140612716191744, 140612716195839,
+STORE, 140612716195840, 140612724584447,
+SNULL, 140612808450047, 140612816838655,
+STORE, 140612808445952, 140612808450047,
+STORE, 140612808450048, 140612816838655,
+SNULL, 140612825235456, 140612833624063,
+STORE, 140612833624064, 140612842016767,
+STORE, 140612825235456, 140612833624063,
+SNULL, 140612833628159, 140612842016767,
+STORE, 140612833624064, 140612833628159,
+STORE, 140612833628160, 140612842016767,
+STORE, 140612699406336, 140612707799039,
+SNULL, 140612699410431, 140612707799039,
+STORE, 140612699406336, 140612699410431,
+STORE, 140612699410432, 140612707799039,
+STORE, 140614384926720, 140614384955391,
+STORE, 140614349332480, 140614351523839,
+SNULL, 140614349332480, 140614349422591,
+STORE, 140614349422592, 140614351523839,
+STORE, 140614349332480, 140614349422591,
+SNULL, 140614351515647, 140614351523839,
+STORE, 140614349422592, 140614351515647,
+STORE, 140614351515648, 140614351523839,
+ERASE, 140614351515648, 140614351523839,
+STORE, 140614351515648, 140614351523839,
+SNULL, 140614351519743, 140614351523839,
+STORE, 140614351515648, 140614351519743,
+STORE, 140614351519744, 140614351523839,
+ERASE, 140614384926720, 140614384955391,
+ERASE, 140613949296640, 140613949300735,
+ERASE, 140613949300736, 140613957689343,
+ERASE, 140613689253888, 140613689257983,
+ERASE, 140613689257984, 140613697646591,
+ERASE, 140613563428864, 140613563432959,
+ERASE, 140613563432960, 140613571821567,
+ERASE, 140613211099136, 140613211103231,
+ERASE, 140613211103232, 140613219491839,
+ERASE, 140614133870592, 140614133874687,
+ERASE, 140614133874688, 140614142263295,
+ERASE, 140612967841792, 140612967845887,
+ERASE, 140612967845888, 140612976234495,
+ERASE, 140613076881408, 140613076885503,
+ERASE, 140613076885504, 140613085274111,
+ERASE, 140612850409472, 140612850413567,
+ERASE, 140612850413568, 140612858802175,
+ERASE, 140613110452224, 140613110456319,
+ERASE, 140613110456320, 140613118844927,
+ERASE, 140613706039296, 140613706043391,
+ERASE, 140613706043392, 140613714431999,
+ERASE, 140613521498112, 140613521502207,
+ERASE, 140613521502208, 140613529890815,
+ERASE, 140613362102272, 140613362106367,
+ERASE, 140613362106368, 140613370494975,
+ERASE, 140613253062656, 140613253066751,
+ERASE, 140613253066752, 140613261455359,
+ERASE, 140612816838656, 140612816842751,
+ERASE, 140612816842752, 140612825231359,
+ERASE, 140613261455360, 140613261459455,
+ERASE, 140613261459456, 140613269848063,
+ERASE, 140613118844928, 140613118849023,
+ERASE, 140613118849024, 140613127237631,
+ERASE, 140613714432000, 140613714436095,
+ERASE, 140613714436096, 140613722824703,
+ERASE, 140613496320000, 140613496324095,
+ERASE, 140613496324096, 140613504712703,
+ERASE, 140613513105408, 140613513109503,
+ERASE, 140613513109504, 140613521498111,
+ERASE, 140613697646592, 140613697650687,
+ERASE, 140613697650688, 140613706039295,
+ERASE, 140613093666816, 140613093670911,
+ERASE, 140613093670912, 140613102059519,
+ERASE, 140612993019904, 140612993023999,
+ERASE, 140612993024000, 140613001412607,
+ERASE, 140613127237632, 140613127241727,
+ERASE, 140613127241728, 140613135630335,
+ERASE, 140613957689344, 140613957693439,
+ERASE, 140613957693440, 140613966082047,
+ERASE, 140613571821568, 140613571825663,
+ERASE, 140613571825664, 140613580214271,
+ERASE, 140613479534592, 140613479538687,
+ERASE, 140613479538688, 140613487927295,
+ERASE, 140612984627200, 140612984631295,
+ERASE, 140612984631296, 140612993019903,
+ERASE, 140613588606976, 140613588611071,
+ERASE, 140613588611072, 140613596999679,
+ERASE, 140613680861184, 140613680865279,
+ERASE, 140613680865280, 140613689253887,
+ERASE, 140613345316864, 140613345320959,
+ERASE, 140613345320960, 140613353709567,
+ERASE, 140613596999680, 140613597003775,
+ERASE, 140613597003776, 140613605392383,
+ERASE, 140613966082048, 140613966086143,
+ERASE, 140613966086144, 140613974474751,
+ERASE, 140613731217408, 140613731221503,
+ERASE, 140613731221504, 140613739610111,
+ERASE, 140613395673088, 140613395677183,
+ERASE, 140613395677184, 140613404065791,
+ERASE, 140612825231360, 140612825235455,
+ERASE, 140612825235456, 140612833624063,
+ERASE, 140612674228224, 140612674232319,
+ERASE, 140612674232320, 140612682620927,
+ERASE, 140613722824704, 140613722828799,
+ERASE, 140613722828800, 140613731217407,
+ERASE, 140613487927296, 140613487931391,
+ERASE, 140613487931392, 140613496319999,
+ERASE, 140613102059520, 140613102063615,
+ERASE, 140613102063616, 140613110452223,
+ERASE, 140614242910208, 140614242914303,
+ERASE, 140614242914304, 140614251302911,
+ERASE, 140612808445952, 140612808450047,
+ERASE, 140612808450048, 140612816838655,
+ERASE, 140613236277248, 140613236281343,
+ERASE, 140613236281344, 140613244669951,
+ERASE, 140613580214272, 140613580218367,
+ERASE, 140613580218368, 140613588606975,
+ERASE, 140613370494976, 140613370499071,
+ERASE, 140613370499072, 140613378887679,
+ERASE, 140613244669952, 140613244674047,
+ERASE, 140613244674048, 140613253062655,
+ERASE, 140612724584448, 140612724588543,
+ERASE, 140612724588544, 140612732977151,
+ERASE, 140612707799040, 140612707803135,
+ERASE, 140612707803136, 140612716191743,
+ERASE, 140613504712704, 140613504716799,
+ERASE, 140613504716800, 140613513105407,
+ };
+
+ unsigned long set39[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140736271417344, 140737488351231,
+SNULL, 140736271421439, 140737488351231,
+STORE, 140736271417344, 140736271421439,
+STORE, 140736271286272, 140736271421439,
+STORE, 94412930822144, 94412933074943,
+SNULL, 94412930953215, 94412933074943,
+STORE, 94412930822144, 94412930953215,
+STORE, 94412930953216, 94412933074943,
+ERASE, 94412930953216, 94412933074943,
+STORE, 94412933046272, 94412933054463,
+STORE, 94412933054464, 94412933074943,
+STORE, 140326136901632, 140326139154431,
+SNULL, 140326137044991, 140326139154431,
+STORE, 140326136901632, 140326137044991,
+STORE, 140326137044992, 140326139154431,
+ERASE, 140326137044992, 140326139154431,
+STORE, 140326139142144, 140326139150335,
+STORE, 140326139150336, 140326139154431,
+STORE, 140736271585280, 140736271589375,
+STORE, 140736271572992, 140736271585279,
+STORE, 140326139113472, 140326139142143,
+STORE, 140326139105280, 140326139113471,
+STORE, 140326134685696, 140326136901631,
+SNULL, 140326134685696, 140326134783999,
+STORE, 140326134784000, 140326136901631,
+STORE, 140326134685696, 140326134783999,
+SNULL, 140326136877055, 140326136901631,
+STORE, 140326134784000, 140326136877055,
+STORE, 140326136877056, 140326136901631,
+SNULL, 140326136877056, 140326136885247,
+STORE, 140326136885248, 140326136901631,
+STORE, 140326136877056, 140326136885247,
+ERASE, 140326136877056, 140326136885247,
+STORE, 140326136877056, 140326136885247,
+ERASE, 140326136885248, 140326136901631,
+STORE, 140326136885248, 140326136901631,
+STORE, 140326130888704, 140326134685695,
+SNULL, 140326130888704, 140326132547583,
+STORE, 140326132547584, 140326134685695,
+STORE, 140326130888704, 140326132547583,
+SNULL, 140326134644735, 140326134685695,
+STORE, 140326132547584, 140326134644735,
+STORE, 140326134644736, 140326134685695,
+SNULL, 140326134644736, 140326134669311,
+STORE, 140326134669312, 140326134685695,
+STORE, 140326134644736, 140326134669311,
+ERASE, 140326134644736, 140326134669311,
+STORE, 140326134644736, 140326134669311,
+ERASE, 140326134669312, 140326134685695,
+STORE, 140326134669312, 140326134685695,
+STORE, 140326139097088, 140326139113471,
+SNULL, 140326134661119, 140326134669311,
+STORE, 140326134644736, 140326134661119,
+STORE, 140326134661120, 140326134669311,
+SNULL, 140326136881151, 140326136885247,
+STORE, 140326136877056, 140326136881151,
+STORE, 140326136881152, 140326136885247,
+SNULL, 94412933050367, 94412933054463,
+STORE, 94412933046272, 94412933050367,
+STORE, 94412933050368, 94412933054463,
+SNULL, 140326139146239, 140326139150335,
+STORE, 140326139142144, 140326139146239,
+STORE, 140326139146240, 140326139150335,
+ERASE, 140326139113472, 140326139142143,
+STORE, 94412939493376, 94412939628543,
+STORE, 140326122496000, 140326130888703,
+SNULL, 140326122500095, 140326130888703,
+STORE, 140326122496000, 140326122500095,
+STORE, 140326122500096, 140326130888703,
+STORE, 140326114103296, 140326122495999,
+STORE, 140325979885568, 140326114103295,
+SNULL, 140325979885568, 140326043910143,
+STORE, 140326043910144, 140326114103295,
+STORE, 140325979885568, 140326043910143,
+ERASE, 140325979885568, 140326043910143,
+SNULL, 140326111019007, 140326114103295,
+STORE, 140326043910144, 140326111019007,
+STORE, 140326111019008, 140326114103295,
+ERASE, 140326111019008, 140326114103295,
+SNULL, 140326044045311, 140326111019007,
+STORE, 140326043910144, 140326044045311,
+STORE, 140326044045312, 140326111019007,
+SNULL, 140326114107391, 140326122495999,
+STORE, 140326114103296, 140326114107391,
+STORE, 140326114107392, 140326122495999,
+STORE, 140326035517440, 140326043910143,
+SNULL, 140326035521535, 140326043910143,
+STORE, 140326035517440, 140326035521535,
+STORE, 140326035521536, 140326043910143,
+STORE, 140326027124736, 140326035517439,
+SNULL, 140326027128831, 140326035517439,
+STORE, 140326027124736, 140326027128831,
+STORE, 140326027128832, 140326035517439,
+STORE, 140326018732032, 140326027124735,
+SNULL, 140326018736127, 140326027124735,
+STORE, 140326018732032, 140326018736127,
+STORE, 140326018736128, 140326027124735,
+STORE, 140326010339328, 140326018732031,
+STORE, 140326001946624, 140326018732031,
+STORE, 140325993553920, 140326018732031,
+STORE, 140325859336192, 140325993553919,
+SNULL, 140325859336192, 140325909692415,
+STORE, 140325909692416, 140325993553919,
+STORE, 140325859336192, 140325909692415,
+ERASE, 140325859336192, 140325909692415,
+SNULL, 140325976801279, 140325993553919,
+STORE, 140325909692416, 140325976801279,
+STORE, 140325976801280, 140325993553919,
+ERASE, 140325976801280, 140325993553919,
+STORE, 140325985161216, 140326018732031,
+STORE, 140325775474688, 140325976801279,
+STORE, 140325708365824, 140325976801279,
+SNULL, 140325708500991, 140325976801279,
+STORE, 140325708365824, 140325708500991,
+STORE, 140325708500992, 140325976801279,
+SNULL, 140325708500992, 140325909692415,
+STORE, 140325909692416, 140325976801279,
+STORE, 140325708500992, 140325909692415,
+SNULL, 140325909827583, 140325976801279,
+STORE, 140325909692416, 140325909827583,
+STORE, 140325909827584, 140325976801279,
+SNULL, 140325842583551, 140325909692415,
+STORE, 140325708500992, 140325842583551,
+STORE, 140325842583552, 140325909692415,
+ERASE, 140325842583552, 140325909692415,
+SNULL, 140325708500992, 140325775474687,
+STORE, 140325775474688, 140325842583551,
+STORE, 140325708500992, 140325775474687,
+SNULL, 140325775609855, 140325842583551,
+STORE, 140325775474688, 140325775609855,
+STORE, 140325775609856, 140325842583551,
+STORE, 140325775609856, 140325909692415,
+SNULL, 140325775609856, 140325842583551,
+STORE, 140325842583552, 140325909692415,
+STORE, 140325775609856, 140325842583551,
+SNULL, 140325842718719, 140325909692415,
+STORE, 140325842583552, 140325842718719,
+STORE, 140325842718720, 140325909692415,
+SNULL, 140325985161216, 140325993553919,
+STORE, 140325993553920, 140326018732031,
+STORE, 140325985161216, 140325993553919,
+SNULL, 140325993558015, 140326018732031,
+STORE, 140325993553920, 140325993558015,
+STORE, 140325993558016, 140326018732031,
+SNULL, 140325985165311, 140325993553919,
+STORE, 140325985161216, 140325985165311,
+STORE, 140325985165312, 140325993553919,
+SNULL, 140325993558016, 140326001946623,
+STORE, 140326001946624, 140326018732031,
+STORE, 140325993558016, 140326001946623,
+SNULL, 140326001950719, 140326018732031,
+STORE, 140326001946624, 140326001950719,
+STORE, 140326001950720, 140326018732031,
+SNULL, 140326001950720, 140326010339327,
+STORE, 140326010339328, 140326018732031,
+STORE, 140326001950720, 140326010339327,
+SNULL, 140326010343423, 140326018732031,
+STORE, 140326010339328, 140326010343423,
+STORE, 140326010343424, 140326018732031,
+STORE, 140325699973120, 140325708365823,
+STORE, 140325691580416, 140325708365823,
+STORE, 140325683187712, 140325708365823,
+SNULL, 140325683191807, 140325708365823,
+STORE, 140325683187712, 140325683191807,
+STORE, 140325683191808, 140325708365823,
+SNULL, 140325683191808, 140325699973119,
+STORE, 140325699973120, 140325708365823,
+STORE, 140325683191808, 140325699973119,
+SNULL, 140325699977215, 140325708365823,
+STORE, 140325699973120, 140325699977215,
+STORE, 140325699977216, 140325708365823,
+STORE, 140325674795008, 140325683187711,
+STORE, 140325666402304, 140325683187711,
+STORE, 140325658009600, 140325683187711,
+SNULL, 140325658009600, 140325666402303,
+STORE, 140325666402304, 140325683187711,
+STORE, 140325658009600, 140325666402303,
+SNULL, 140325666406399, 140325683187711,
+STORE, 140325666402304, 140325666406399,
+STORE, 140325666406400, 140325683187711,
+SNULL, 140325683191808, 140325691580415,
+STORE, 140325691580416, 140325699973119,
+STORE, 140325683191808, 140325691580415,
+SNULL, 140325691584511, 140325699973119,
+STORE, 140325691580416, 140325691584511,
+STORE, 140325691584512, 140325699973119,
+SNULL, 140325666406400, 140325674795007,
+STORE, 140325674795008, 140325683187711,
+STORE, 140325666406400, 140325674795007,
+SNULL, 140325674799103, 140325683187711,
+STORE, 140325674795008, 140325674799103,
+STORE, 140325674799104, 140325683187711,
+STORE, 140325649616896, 140325666402303,
+SNULL, 140325649616896, 140325658009599,
+STORE, 140325658009600, 140325666402303,
+STORE, 140325649616896, 140325658009599,
+SNULL, 140325658013695, 140325666402303,
+STORE, 140325658009600, 140325658013695,
+STORE, 140325658013696, 140325666402303,
+SNULL, 140325649620991, 140325658009599,
+STORE, 140325649616896, 140325649620991,
+STORE, 140325649620992, 140325658009599,
+STORE, 140325641224192, 140325649616895,
+STORE, 140325632831488, 140325649616895,
+SNULL, 140325632835583, 140325649616895,
+STORE, 140325632831488, 140325632835583,
+STORE, 140325632835584, 140325649616895,
+STORE, 140325624438784, 140325632831487,
+SNULL, 140325624442879, 140325632831487,
+STORE, 140325624438784, 140325624442879,
+STORE, 140325624442880, 140325632831487,
+SNULL, 140325632835584, 140325641224191,
+STORE, 140325641224192, 140325649616895,
+STORE, 140325632835584, 140325641224191,
+SNULL, 140325641228287, 140325649616895,
+STORE, 140325641224192, 140325641228287,
+STORE, 140325641228288, 140325649616895,
+STORE, 140325616046080, 140325624438783,
+SNULL, 140325616050175, 140325624438783,
+STORE, 140325616046080, 140325616050175,
+STORE, 140325616050176, 140325624438783,
+STORE, 140325607653376, 140325616046079,
+SNULL, 140325607657471, 140325616046079,
+STORE, 140325607653376, 140325607657471,
+STORE, 140325607657472, 140325616046079,
+STORE, 140325599260672, 140325607653375,
+STORE, 140325590867968, 140325607653375,
+STORE, 140325456650240, 140325590867967,
+SNULL, 140325456650240, 140325507039231,
+STORE, 140325507039232, 140325590867967,
+STORE, 140325456650240, 140325507039231,
+ERASE, 140325456650240, 140325507039231,
+STORE, 140325498646528, 140325507039231,
+STORE, 140325364428800, 140325498646527,
+SNULL, 140325364428800, 140325372821503,
+STORE, 140325372821504, 140325498646527,
+STORE, 140325364428800, 140325372821503,
+ERASE, 140325364428800, 140325372821503,
+STORE, 140325364428800, 140325372821503,
+STORE, 140325356036096, 140325372821503,
+STORE, 140325221818368, 140325356036095,
+SNULL, 140325221818368, 140325238603775,
+STORE, 140325238603776, 140325356036095,
+STORE, 140325221818368, 140325238603775,
+ERASE, 140325221818368, 140325238603775,
+STORE, 140325230211072, 140325238603775,
+STORE, 140325221818368, 140325238603775,
+STORE, 140325087600640, 140325221818367,
+STORE, 140325079207936, 140325087600639,
+SNULL, 140325087600640, 140325104386047,
+STORE, 140325104386048, 140325221818367,
+STORE, 140325087600640, 140325104386047,
+ERASE, 140325087600640, 140325104386047,
+STORE, 140325095993344, 140325104386047,
+STORE, 140325079207936, 140325104386047,
+STORE, 140324944990208, 140325079207935,
+SNULL, 140324944990208, 140324970168319,
+STORE, 140324970168320, 140325079207935,
+STORE, 140324944990208, 140324970168319,
+ERASE, 140324944990208, 140324970168319,
+STORE, 140324961775616, 140324970168319,
+STORE, 140324953382912, 140324970168319,
+STORE, 140324819165184, 140324953382911,
+STORE, 140324684947456, 140324953382911,
+STORE, 140324676554752, 140324684947455,
+STORE, 140324668162048, 140324684947455,
+STORE, 140324533944320, 140324668162047,
+STORE, 140324525551616, 140324533944319,
+SNULL, 140324533944320, 140324567515135,
+STORE, 140324567515136, 140324668162047,
+STORE, 140324533944320, 140324567515135,
+ERASE, 140324533944320, 140324567515135,
+STORE, 140324559122432, 140324567515135,
+STORE, 140324391333888, 140324525551615,
+SNULL, 140325574148095, 140325590867967,
+STORE, 140325507039232, 140325574148095,
+STORE, 140325574148096, 140325590867967,
+ERASE, 140325574148096, 140325590867967,
+SNULL, 140325439930367, 140325498646527,
+STORE, 140325372821504, 140325439930367,
+STORE, 140325439930368, 140325498646527,
+ERASE, 140325439930368, 140325498646527,
+SNULL, 140325305712639, 140325356036095,
+STORE, 140325238603776, 140325305712639,
+STORE, 140325305712640, 140325356036095,
+ERASE, 140325305712640, 140325356036095,
+SNULL, 140325171494911, 140325221818367,
+STORE, 140325104386048, 140325171494911,
+STORE, 140325171494912, 140325221818367,
+ERASE, 140325171494912, 140325221818367,
+SNULL, 140325104521215, 140325171494911,
+STORE, 140325104386048, 140325104521215,
+STORE, 140325104521216, 140325171494911,
+STORE, 140324257116160, 140324525551615,
+SNULL, 140324257116160, 140324299079679,
+STORE, 140324299079680, 140324525551615,
+STORE, 140324257116160, 140324299079679,
+ERASE, 140324257116160, 140324299079679,
+SNULL, 140325037277183, 140325079207935,
+STORE, 140324970168320, 140325037277183,
+STORE, 140325037277184, 140325079207935,
+ERASE, 140325037277184, 140325079207935,
+SNULL, 140324819165183, 140324953382911,
+STORE, 140324684947456, 140324819165183,
+STORE, 140324819165184, 140324953382911,
+SNULL, 140324819165184, 140324835950591,
+STORE, 140324835950592, 140324953382911,
+STORE, 140324819165184, 140324835950591,
+ERASE, 140324819165184, 140324835950591,
+SNULL, 140324903059455, 140324953382911,
+STORE, 140324835950592, 140324903059455,
+STORE, 140324903059456, 140324953382911,
+ERASE, 140324903059456, 140324953382911,
+SNULL, 140324684947456, 140324701732863,
+STORE, 140324701732864, 140324819165183,
+STORE, 140324684947456, 140324701732863,
+ERASE, 140324684947456, 140324701732863,
+SNULL, 140324768841727, 140324819165183,
+STORE, 140324701732864, 140324768841727,
+STORE, 140324768841728, 140324819165183,
+ERASE, 140324768841728, 140324819165183,
+SNULL, 140324634623999, 140324668162047,
+STORE, 140324567515136, 140324634623999,
+STORE, 140324634624000, 140324668162047,
+ERASE, 140324634624000, 140324668162047,
+SNULL, 140324391333887, 140324525551615,
+STORE, 140324299079680, 140324391333887,
+STORE, 140324391333888, 140324525551615,
+SNULL, 140324391333888, 140324433297407,
+STORE, 140324433297408, 140324525551615,
+STORE, 140324391333888, 140324433297407,
+ERASE, 140324391333888, 140324433297407,
+SNULL, 140325507174399, 140325574148095,
+STORE, 140325507039232, 140325507174399,
+STORE, 140325507174400, 140325574148095,
+SNULL, 140325590867968, 140325599260671,
+STORE, 140325599260672, 140325607653375,
+STORE, 140325590867968, 140325599260671,
+SNULL, 140325599264767, 140325607653375,
+STORE, 140325599260672, 140325599264767,
+STORE, 140325599264768, 140325607653375,
+SNULL, 140325372956671, 140325439930367,
+STORE, 140325372821504, 140325372956671,
+STORE, 140325372956672, 140325439930367,
+SNULL, 140324668166143, 140324684947455,
+STORE, 140324668162048, 140324668166143,
+STORE, 140324668166144, 140324684947455,
+SNULL, 140324525555711, 140324533944319,
+STORE, 140324525551616, 140324525555711,
+STORE, 140324525555712, 140324533944319,
+SNULL, 140324953382912, 140324961775615,
+STORE, 140324961775616, 140324970168319,
+STORE, 140324953382912, 140324961775615,
+SNULL, 140324961779711, 140324970168319,
+STORE, 140324961775616, 140324961779711,
+STORE, 140324961779712, 140324970168319,
+SNULL, 140325079212031, 140325104386047,
+STORE, 140325079207936, 140325079212031,
+STORE, 140325079212032, 140325104386047,
+SNULL, 140325221818368, 140325230211071,
+STORE, 140325230211072, 140325238603775,
+STORE, 140325221818368, 140325230211071,
+SNULL, 140325230215167, 140325238603775,
+STORE, 140325230211072, 140325230215167,
+STORE, 140325230215168, 140325238603775,
+SNULL, 140325356036096, 140325364428799,
+STORE, 140325364428800, 140325372821503,
+STORE, 140325356036096, 140325364428799,
+SNULL, 140325364432895, 140325372821503,
+ };
+ unsigned long set40[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734309167104, 140737488351231,
+SNULL, 140734309171199, 140737488351231,
+STORE, 140734309167104, 140734309171199,
+STORE, 140734309036032, 140734309171199,
+STORE, 94270500081664, 94270502334463,
+SNULL, 94270500212735, 94270502334463,
+STORE, 94270500081664, 94270500212735,
+STORE, 94270500212736, 94270502334463,
+ERASE, 94270500212736, 94270502334463,
+STORE, 94270502305792, 94270502313983,
+STORE, 94270502313984, 94270502334463,
+STORE, 140321935110144, 140321937362943,
+SNULL, 140321935253503, 140321937362943,
+STORE, 140321935110144, 140321935253503,
+STORE, 140321935253504, 140321937362943,
+ERASE, 140321935253504, 140321937362943,
+STORE, 140321937350656, 140321937358847,
+STORE, 140321937358848, 140321937362943,
+STORE, 140734309625856, 140734309629951,
+STORE, 140734309613568, 140734309625855,
+STORE, 140321937321984, 140321937350655,
+STORE, 140321937313792, 140321937321983,
+STORE, 140321932894208, 140321935110143,
+SNULL, 140321932894208, 140321932992511,
+STORE, 140321932992512, 140321935110143,
+STORE, 140321932894208, 140321932992511,
+SNULL, 140321935085567, 140321935110143,
+STORE, 140321932992512, 140321935085567,
+STORE, 140321935085568, 140321935110143,
+SNULL, 140321935085568, 140321935093759,
+STORE, 140321935093760, 140321935110143,
+STORE, 140321935085568, 140321935093759,
+ERASE, 140321935085568, 140321935093759,
+STORE, 140321935085568, 140321935093759,
+ERASE, 140321935093760, 140321935110143,
+STORE, 140321935093760, 140321935110143,
+STORE, 140321929097216, 140321932894207,
+SNULL, 140321929097216, 140321930756095,
+STORE, 140321930756096, 140321932894207,
+STORE, 140321929097216, 140321930756095,
+SNULL, 140321932853247, 140321932894207,
+STORE, 140321930756096, 140321932853247,
+STORE, 140321932853248, 140321932894207,
+SNULL, 140321932853248, 140321932877823,
+STORE, 140321932877824, 140321932894207,
+STORE, 140321932853248, 140321932877823,
+ERASE, 140321932853248, 140321932877823,
+STORE, 140321932853248, 140321932877823,
+ERASE, 140321932877824, 140321932894207,
+STORE, 140321932877824, 140321932894207,
+STORE, 140321937305600, 140321937321983,
+SNULL, 140321932869631, 140321932877823,
+STORE, 140321932853248, 140321932869631,
+STORE, 140321932869632, 140321932877823,
+SNULL, 140321935089663, 140321935093759,
+STORE, 140321935085568, 140321935089663,
+STORE, 140321935089664, 140321935093759,
+SNULL, 94270502309887, 94270502313983,
+STORE, 94270502305792, 94270502309887,
+STORE, 94270502309888, 94270502313983,
+SNULL, 140321937354751, 140321937358847,
+STORE, 140321937350656, 140321937354751,
+STORE, 140321937354752, 140321937358847,
+ERASE, 140321937321984, 140321937350655,
+STORE, 94270507364352, 94270507499519,
+STORE, 140321920704512, 140321929097215,
+SNULL, 140321920708607, 140321929097215,
+STORE, 140321920704512, 140321920708607,
+STORE, 140321920708608, 140321929097215,
+STORE, 140321912311808, 140321920704511,
+STORE, 140321778094080, 140321912311807,
+SNULL, 140321778094080, 140321816051711,
+STORE, 140321816051712, 140321912311807,
+STORE, 140321778094080, 140321816051711,
+ERASE, 140321778094080, 140321816051711,
+SNULL, 140321883160575, 140321912311807,
+STORE, 140321816051712, 140321883160575,
+STORE, 140321883160576, 140321912311807,
+ERASE, 140321883160576, 140321912311807,
+SNULL, 140321816186879, 140321883160575,
+STORE, 140321816051712, 140321816186879,
+STORE, 140321816186880, 140321883160575,
+SNULL, 140321912315903, 140321920704511,
+STORE, 140321912311808, 140321912315903,
+STORE, 140321912315904, 140321920704511,
+STORE, 140321903919104, 140321912311807,
+SNULL, 140321903923199, 140321912311807,
+STORE, 140321903919104, 140321903923199,
+STORE, 140321903923200, 140321912311807,
+STORE, 140321895526400, 140321903919103,
+SNULL, 140321895530495, 140321903919103,
+STORE, 140321895526400, 140321895530495,
+STORE, 140321895530496, 140321903919103,
+STORE, 140321887133696, 140321895526399,
+SNULL, 140321887137791, 140321895526399,
+STORE, 140321887133696, 140321887137791,
+STORE, 140321887137792, 140321895526399,
+STORE, 140321807659008, 140321816051711,
+STORE, 140321673441280, 140321807659007,
+SNULL, 140321673441280, 140321681833983,
+STORE, 140321681833984, 140321807659007,
+STORE, 140321673441280, 140321681833983,
+ERASE, 140321673441280, 140321681833983,
+SNULL, 140321748942847, 140321807659007,
+STORE, 140321681833984, 140321748942847,
+STORE, 140321748942848, 140321807659007,
+ERASE, 140321748942848, 140321807659007,
+STORE, 140321799266304, 140321816051711,
+STORE, 140321790873600, 140321816051711,
+STORE, 140321782480896, 140321816051711,
+STORE, 140321547616256, 140321748942847,
+SNULL, 140321614725119, 140321748942847,
+STORE, 140321547616256, 140321614725119,
+STORE, 140321614725120, 140321748942847,
+SNULL, 140321614725120, 140321681833983,
+STORE, 140321681833984, 140321748942847,
+STORE, 140321614725120, 140321681833983,
+ERASE, 140321614725120, 140321681833983,
+SNULL, 140321681969151, 140321748942847,
+STORE, 140321681833984, 140321681969151,
+STORE, 140321681969152, 140321748942847,
+STORE, 140321547616256, 140321681833983,
+SNULL, 140321547616256, 140321614725119,
+STORE, 140321614725120, 140321681833983,
+STORE, 140321547616256, 140321614725119,
+SNULL, 140321614860287, 140321681833983,
+STORE, 140321614725120, 140321614860287,
+STORE, 140321614860288, 140321681833983,
+SNULL, 140321547751423, 140321614725119,
+STORE, 140321547616256, 140321547751423,
+STORE, 140321547751424, 140321614725119,
+STORE, 140321480507392, 140321547616255,
+SNULL, 140321782480896, 140321799266303,
+STORE, 140321799266304, 140321816051711,
+STORE, 140321782480896, 140321799266303,
+SNULL, 140321799270399, 140321816051711,
+STORE, 140321799266304, 140321799270399,
+STORE, 140321799270400, 140321816051711,
+STORE, 140321774088192, 140321799266303,
+SNULL, 140321774088192, 140321790873599,
+STORE, 140321790873600, 140321799266303,
+STORE, 140321774088192, 140321790873599,
+SNULL, 140321790877695, 140321799266303,
+STORE, 140321790873600, 140321790877695,
+STORE, 140321790877696, 140321799266303,
+SNULL, 140321480642559, 140321547616255,
+STORE, 140321480507392, 140321480642559,
+STORE, 140321480642560, 140321547616255,
+SNULL, 140321774088192, 140321782480895,
+STORE, 140321782480896, 140321790873599,
+STORE, 140321774088192, 140321782480895,
+SNULL, 140321782484991, 140321790873599,
+STORE, 140321782480896, 140321782484991,
+STORE, 140321782484992, 140321790873599,
+SNULL, 140321799270400, 140321807659007,
+STORE, 140321807659008, 140321816051711,
+STORE, 140321799270400, 140321807659007,
+SNULL, 140321807663103, 140321816051711,
+STORE, 140321807659008, 140321807663103,
+STORE, 140321807663104, 140321816051711,
+STORE, 140321765695488, 140321782480895,
+STORE, 140321757302784, 140321782480895,
+SNULL, 140321757306879, 140321782480895,
+STORE, 140321757302784, 140321757306879,
+STORE, 140321757306880, 140321782480895,
+STORE, 140321472114688, 140321480507391,
+STORE, 140321463721984, 140321480507391,
+SNULL, 140321463726079, 140321480507391,
+STORE, 140321463721984, 140321463726079,
+STORE, 140321463726080, 140321480507391,
+SNULL, 140321757306880, 140321774088191,
+STORE, 140321774088192, 140321782480895,
+STORE, 140321757306880, 140321774088191,
+SNULL, 140321774092287, 140321782480895,
+STORE, 140321774088192, 140321774092287,
+STORE, 140321774092288, 140321782480895,
+SNULL, 140321463726080, 140321472114687,
+STORE, 140321472114688, 140321480507391,
+STORE, 140321463726080, 140321472114687,
+SNULL, 140321472118783, 140321480507391,
+STORE, 140321472114688, 140321472118783,
+STORE, 140321472118784, 140321480507391,
+SNULL, 140321757306880, 140321765695487,
+STORE, 140321765695488, 140321774088191,
+STORE, 140321757306880, 140321765695487,
+SNULL, 140321765699583, 140321774088191,
+STORE, 140321765695488, 140321765699583,
+STORE, 140321765699584, 140321774088191,
+STORE, 140321455329280, 140321463721983,
+SNULL, 140321455333375, 140321463721983,
+STORE, 140321455329280, 140321455333375,
+STORE, 140321455333376, 140321463721983,
+STORE, 140321446936576, 140321455329279,
+STORE, 140321438543872, 140321455329279,
+STORE, 140321430151168, 140321455329279,
+SNULL, 140321430155263, 140321455329279,
+STORE, 140321430151168, 140321430155263,
+STORE, 140321430155264, 140321455329279,
+SNULL, 140321430155264, 140321446936575,
+STORE, 140321446936576, 140321455329279,
+STORE, 140321430155264, 140321446936575,
+SNULL, 140321446940671, 140321455329279,
+STORE, 140321446936576, 140321446940671,
+STORE, 140321446940672, 140321455329279,
+SNULL, 140321430155264, 140321438543871,
+STORE, 140321438543872, 140321446936575,
+STORE, 140321430155264, 140321438543871,
+SNULL, 140321438547967, 140321446936575,
+STORE, 140321438543872, 140321438547967,
+STORE, 140321438547968, 140321446936575,
+STORE, 140321421758464, 140321430151167,
+SNULL, 140321421762559, 140321430151167,
+STORE, 140321421758464, 140321421762559,
+STORE, 140321421762560, 140321430151167,
+STORE, 140321413365760, 140321421758463,
+SNULL, 140321413369855, 140321421758463,
+STORE, 140321413365760, 140321413369855,
+STORE, 140321413369856, 140321421758463,
+STORE, 140321404973056, 140321413365759,
+SNULL, 140321404977151, 140321413365759,
+STORE, 140321404973056, 140321404977151,
+STORE, 140321404977152, 140321413365759,
+STORE, 140321396580352, 140321404973055,
+STORE, 140321388187648, 140321404973055,
+STORE, 140321253969920, 140321388187647,
+SNULL, 140321253969920, 140321279180799,
+STORE, 140321279180800, 140321388187647,
+STORE, 140321253969920, 140321279180799,
+ERASE, 140321253969920, 140321279180799,
+SNULL, 140321346289663, 140321388187647,
+STORE, 140321279180800, 140321346289663,
+STORE, 140321346289664, 140321388187647,
+ERASE, 140321346289664, 140321388187647,
+STORE, 140321144963072, 140321346289663,
+STORE, 140321379794944, 140321404973055,
+STORE, 140321371402240, 140321404973055,
+STORE, 140321010745344, 140321346289663,
+STORE, 140321363009536, 140321404973055,
+SNULL, 140321077854207, 140321346289663,
+STORE, 140321010745344, 140321077854207,
+STORE, 140321077854208, 140321346289663,
+SNULL, 140321077854208, 140321144963071,
+STORE, 140321144963072, 140321346289663,
+STORE, 140321077854208, 140321144963071,
+ERASE, 140321077854208, 140321144963071,
+STORE, 140321354616832, 140321404973055,
+STORE, 140321136570368, 140321144963071,
+STORE, 140320943636480, 140321077854207,
+STORE, 140320876527616, 140321077854207,
+STORE, 140321128177664, 140321144963071,
+SNULL, 140320876662783, 140321077854207,
+STORE, 140320876527616, 140320876662783,
+STORE, 140320876662784, 140321077854207,
+STORE, 140321119784960, 140321144963071,
+STORE, 140321111392256, 140321144963071,
+STORE, 140320742309888, 140320876527615,
+STORE, 140321102999552, 140321144963071,
+STORE, 140320608092160, 140320876527615,
+SNULL, 140320675201023, 140320876527615,
+STORE, 140320608092160, 140320675201023,
+STORE, 140320675201024, 140320876527615,
+SNULL, 140320675201024, 140320742309887,
+STORE, 140320742309888, 140320876527615,
+STORE, 140320675201024, 140320742309887,
+ERASE, 140320675201024, 140320742309887,
+STORE, 140321094606848, 140321144963071,
+STORE, 140321086214144, 140321144963071,
+STORE, 140320608092160, 140320876527615,
+SNULL, 140320608092160, 140320675201023,
+STORE, 140320675201024, 140320876527615,
+STORE, 140320608092160, 140320675201023,
+SNULL, 140320675336191, 140320876527615,
+STORE, 140320675201024, 140320675336191,
+STORE, 140320675336192, 140320876527615,
+STORE, 140320599699456, 140320608092159,
+STORE, 140320591306752, 140320608092159,
+STORE, 140320457089024, 140320591306751,
+STORE, 140320448696320, 140320457089023,
+STORE, 140320314478592, 140320448696319,
+SNULL, 140321144963072, 140321279180799,
+STORE, 140321279180800, 140321346289663,
+STORE, 140321144963072, 140321279180799,
+SNULL, 140321279315967, 140321346289663,
+STORE, 140321279180800, 140321279315967,
+STORE, 140321279315968, 140321346289663,
+SNULL, 140321086214144, 140321136570367,
+STORE, 140321136570368, 140321144963071,
+STORE, 140321086214144, 140321136570367,
+SNULL, 140321136574463, 140321144963071,
+STORE, 140321136570368, 140321136574463,
+STORE, 140321136574464, 140321144963071,
+SNULL, 140321212071935, 140321279180799,
+STORE, 140321144963072, 140321212071935,
+STORE, 140321212071936, 140321279180799,
+ERASE, 140321212071936, 140321279180799,
+SNULL, 140321145098239, 140321212071935,
+STORE, 140321144963072, 140321145098239,
+STORE, 140321145098240, 140321212071935,
+SNULL, 140320876662784, 140321010745343,
+STORE, 140321010745344, 140321077854207,
+STORE, 140320876662784, 140321010745343,
+SNULL, 140321010880511, 140321077854207,
+STORE, 140321010745344, 140321010880511,
+STORE, 140321010880512, 140321077854207,
+SNULL, 140321354616832, 140321379794943,
+STORE, 140321379794944, 140321404973055,
+STORE, 140321354616832, 140321379794943,
+SNULL, 140321379799039, 140321404973055,
+STORE, 140321379794944, 140321379799039,
+STORE, 140321379799040, 140321404973055,
+SNULL, 140320876662784, 140320943636479,
+STORE, 140320943636480, 140321010745343,
+STORE, 140320876662784, 140320943636479,
+SNULL, 140320943771647, 140321010745343,
+STORE, 140320943636480, 140320943771647,
+STORE, 140320943771648, 140321010745343,
+SNULL, 140320809418751, 140320876527615,
+STORE, 140320675336192, 140320809418751,
+STORE, 140320809418752, 140320876527615,
+ERASE, 140320809418752, 140320876527615,
+SNULL, 140320675336192, 140320742309887,
+STORE, 140320742309888, 140320809418751,
+STORE, 140320675336192, 140320742309887,
+SNULL, 140320742445055, 140320809418751,
+STORE, 140320742309888, 140320742445055,
+STORE, 140320742445056, 140320809418751,
+SNULL, 140320608227327, 140320675201023,
+STORE, 140320608092160, 140320608227327,
+STORE, 140320608227328, 140320675201023,
+SNULL, 140320457089024, 140320473874431,
+STORE, 140320473874432, 140320591306751,
+STORE, 140320457089024, 140320473874431,
+ERASE, 140320457089024, 140320473874431,
+SNULL, 140320540983295, 140320591306751,
+STORE, 140320473874432, 140320540983295,
+STORE, 140320540983296, 140320591306751,
+ERASE, 140320540983296, 140320591306751,
+SNULL, 140320314478592, 140320339656703,
+STORE, 140320339656704, 140320448696319,
+STORE, 140320314478592, 140320339656703,
+ERASE, 140320314478592, 140320339656703,
+SNULL, 140321086214144, 140321128177663,
+STORE, 140321128177664, 140321136570367,
+STORE, 140321086214144, 140321128177663,
+SNULL, 140321128181759, 140321136570367,
+STORE, 140321128177664, 140321128181759,
+STORE, 140321128181760, 140321136570367,
+SNULL, 140321354616832, 140321371402239,
+STORE, 140321371402240, 140321379794943,
+STORE, 140321354616832, 140321371402239,
+SNULL, 140321371406335, 140321379794943,
+STORE, 140321371402240, 140321371406335,
+STORE, 140321371406336, 140321379794943,
+SNULL, 140320591310847, 140320608092159,
+STORE, 140320591306752, 140320591310847,
+STORE, 140320591310848, 140320608092159,
+SNULL, 140321354616832, 140321363009535,
+STORE, 140321363009536, 140321371402239,
+STORE, 140321354616832, 140321363009535,
+SNULL, 140321363013631, 140321371402239,
+STORE, 140321363009536, 140321363013631,
+STORE, 140321363013632, 140321371402239,
+SNULL, 140321086214144, 140321119784959,
+STORE, 140321119784960, 140321128177663,
+STORE, 140321086214144, 140321119784959,
+SNULL, 140321119789055, 140321128177663,
+STORE, 140321119784960, 140321119789055,
+STORE, 140321119789056, 140321128177663,
+SNULL, 140321086218239, 140321119784959,
+STORE, 140321086214144, 140321086218239,
+STORE, 140321086218240, 140321119784959,
+SNULL, 140321086218240, 140321094606847,
+STORE, 140321094606848, 140321119784959,
+STORE, 140321086218240, 140321094606847,
+SNULL, 140321094610943, 140321119784959,
+STORE, 140321094606848, 140321094610943,
+STORE, 140321094610944, 140321119784959,
+SNULL, 140320474009599, 140320540983295,
+STORE, 140320473874432, 140320474009599,
+STORE, 140320474009600, 140320540983295,
+SNULL, 140320406765567, 140320448696319,
+STORE, 140320339656704, 140320406765567,
+STORE, 140320406765568, 140320448696319,
+ERASE, 140320406765568, 140320448696319,
+SNULL, 140320339791871, 140320406765567,
+STORE, 140320339656704, 140320339791871,
+STORE, 140320339791872, 140320406765567,
+STORE, 140321270788096, 140321279180799,
+STORE, 140321262395392, 140321279180799,
+STORE, 140321254002688, 140321279180799,
+SNULL, 140321254002688, 140321262395391,
+STORE, 140321262395392, 140321279180799,
+STORE, 140321254002688, 140321262395391,
+SNULL, 140321262399487, 140321279180799,
+STORE, 140321262395392, 140321262399487,
+STORE, 140321262399488, 140321279180799,
+STORE, 140321245609984, 140321262395391,
+STORE, 140321237217280, 140321262395391,
+SNULL, 140321237217280, 140321245609983,
+STORE, 140321245609984, 140321262395391,
+STORE, 140321237217280, 140321245609983,
+SNULL, 140321245614079, 140321262395391,
+STORE, 140321245609984, 140321245614079,
+STORE, 140321245614080, 140321262395391,
+SNULL, 140321379799040, 140321388187647,
+STORE, 140321388187648, 140321404973055,
+STORE, 140321379799040, 140321388187647,
+SNULL, 140321388191743, 140321404973055,
+STORE, 140321388187648, 140321388191743,
+STORE, 140321388191744, 140321404973055,
+SNULL, 140321354620927, 140321363009535,
+STORE, 140321354616832, 140321354620927,
+STORE, 140321354620928, 140321363009535,
+SNULL, 140321388191744, 140321396580351,
+STORE, 140321396580352, 140321404973055,
+STORE, 140321388191744, 140321396580351,
+SNULL, 140321396584447, 140321404973055,
+STORE, 140321396580352, 140321396584447,
+STORE, 140321396584448, 140321404973055,
+SNULL, 140321094610944, 140321111392255,
+STORE, 140321111392256, 140321119784959,
+STORE, 140321094610944, 140321111392255,
+SNULL, 140321111396351, 140321119784959,
+STORE, 140321111392256, 140321111396351,
+STORE, 140321111396352, 140321119784959,
+STORE, 140321228824576, 140321245609983,
+SNULL, 140321094610944, 140321102999551,
+STORE, 140321102999552, 140321111392255,
+STORE, 140321094610944, 140321102999551,
+SNULL, 140321103003647, 140321111392255,
+STORE, 140321102999552, 140321103003647,
+STORE, 140321103003648, 140321111392255,
+STORE, 140321220431872, 140321245609983,
+SNULL, 140321220435967, 140321245609983,
+STORE, 140321220431872, 140321220435967,
+STORE, 140321220435968, 140321245609983,
+STORE, 140320868134912, 140320876527615,
+SNULL, 140320868139007, 140320876527615,
+STORE, 140320868134912, 140320868139007,
+STORE, 140320868139008, 140320876527615,
+SNULL, 140320591310848, 140320599699455,
+STORE, 140320599699456, 140320608092159,
+STORE, 140320591310848, 140320599699455,
+SNULL, 140320599703551, 140320608092159,
+STORE, 140320599699456, 140320599703551,
+STORE, 140320599703552, 140320608092159,
+STORE, 140320859742208, 140320868134911,
+SNULL, 140321262399488, 140321270788095,
+STORE, 140321270788096, 140321279180799,
+STORE, 140321262399488, 140321270788095,
+SNULL, 140321270792191, 140321279180799,
+STORE, 140321270788096, 140321270792191,
+STORE, 140321270792192, 140321279180799,
+STORE, 140320851349504, 140320868134911,
+STORE, 140320842956800, 140320868134911,
+STORE, 140320834564096, 140320868134911,
+STORE, 140320826171392, 140320868134911,
+SNULL, 140320826171392, 140320834564095,
+STORE, 140320834564096, 140320868134911,
+STORE, 140320826171392, 140320834564095,
+SNULL, 140320834568191, 140320868134911,
+STORE, 140320834564096, 140320834568191,
+STORE, 140320834568192, 140320868134911,
+SNULL, 140321220435968, 140321228824575,
+STORE, 140321228824576, 140321245609983,
+STORE, 140321220435968, 140321228824575,
+SNULL, 140321228828671, 140321245609983,
+STORE, 140321228824576, 140321228828671,
+STORE, 140321228828672, 140321245609983,
+STORE, 140320817778688, 140320834564095,
+SNULL, 140320817782783, 140320834564095,
+STORE, 140320817778688, 140320817782783,
+STORE, 140320817782784, 140320834564095,
+STORE, 140320582914048, 140320591306751,
+SNULL, 140321228828672, 140321237217279,
+STORE, 140321237217280, 140321245609983,
+STORE, 140321228828672, 140321237217279,
+SNULL, 140321237221375, 140321245609983,
+STORE, 140321237217280, 140321237221375,
+STORE, 140321237221376, 140321245609983,
+SNULL, 140320448700415, 140320457089023,
+STORE, 140320448696320, 140320448700415,
+STORE, 140320448700416, 140320457089023,
+SNULL, 140321245614080, 140321254002687,
+STORE, 140321254002688, 140321262395391,
+STORE, 140321245614080, 140321254002687,
+SNULL, 140321254006783, 140321262395391,
+STORE, 140321254002688, 140321254006783,
+STORE, 140321254006784, 140321262395391,
+STORE, 140320574521344, 140320591306751,
+SNULL, 140320574525439, 140320591306751,
+STORE, 140320574521344, 140320574525439,
+STORE, 140320574525440, 140320591306751,
+STORE, 140320566128640, 140320574521343,
+SNULL, 140320566132735, 140320574521343,
+STORE, 140320566128640, 140320566132735,
+STORE, 140320566132736, 140320574521343,
+SNULL, 140320574525440, 140320582914047,
+STORE, 140320582914048, 140320591306751,
+STORE, 140320574525440, 140320582914047,
+SNULL, 140320582918143, 140320591306751,
+STORE, 140320582914048, 140320582918143,
+STORE, 140320582918144, 140320591306751,
+STORE, 140320557735936, 140320566128639,
+SNULL, 140320557740031, 140320566128639,
+STORE, 140320557735936, 140320557740031,
+STORE, 140320557740032, 140320566128639,
+STORE, 140320549343232, 140320557735935,
+STORE, 140320465481728, 140320473874431,
+STORE, 140320448700416, 140320473874431,
+SNULL, 140320834568192, 140320859742207,
+STORE, 140320859742208, 140320868134911,
+STORE, 140320834568192, 140320859742207,
+SNULL, 140320859746303, 140320868134911,
+STORE, 140320859742208, 140320859746303,
+STORE, 140320859746304, 140320868134911,
+STORE, 140320440303616, 140320448696319,
+STORE, 140320431910912, 140320448696319,
+SNULL, 140320834568192, 140320851349503,
+STORE, 140320851349504, 140320859742207,
+STORE, 140320834568192, 140320851349503,
+SNULL, 140320851353599, 140320859742207,
+STORE, 140320851349504, 140320851353599,
+STORE, 140320851353600, 140320859742207,
+SNULL, 140320817782784, 140320826171391,
+STORE, 140320826171392, 140320834564095,
+STORE, 140320817782784, 140320826171391,
+SNULL, 140320826175487, 140320834564095,
+STORE, 140320826171392, 140320826175487,
+STORE, 140320826175488, 140320834564095,
+SNULL, 140320834568192, 140320842956799,
+STORE, 140320842956800, 140320851349503,
+STORE, 140320834568192, 140320842956799,
+SNULL, 140320842960895, 140320851349503,
+STORE, 140320842956800, 140320842960895,
+STORE, 140320842960896, 140320851349503,
+STORE, 140320423518208, 140320448696319,
+SNULL, 140320423522303, 140320448696319,
+STORE, 140320423518208, 140320423522303,
+STORE, 140320423522304, 140320448696319,
+STORE, 140320415125504, 140320423518207,
+STORE, 140320331264000, 140320339656703,
+STORE, 140320322871296, 140320339656703,
+STORE, 140320314478592, 140320339656703,
+SNULL, 140320314482687, 140320339656703,
+STORE, 140320314478592, 140320314482687,
+STORE, 140320314482688, 140320339656703,
+STORE, 140320306085888, 140320314478591,
+SNULL, 140320306089983, 140320314478591,
+STORE, 140320306085888, 140320306089983,
+STORE, 140320306089984, 140320314478591,
+STORE, 140320297693184, 140320306085887,
+SNULL, 140320297697279, 140320306085887,
+STORE, 140320297693184, 140320297697279,
+STORE, 140320297697280, 140320306085887,
+STORE, 140320289300480, 140320297693183,
+STORE, 140320280907776, 140320297693183,
+SNULL, 140320280911871, 140320297693183,
+STORE, 140320280907776, 140320280911871,
+STORE, 140320280911872, 140320297693183,
+SNULL, 140320423522304, 140320431910911,
+STORE, 140320431910912, 140320448696319,
+STORE, 140320423522304, 140320431910911,
+SNULL, 140320431915007, 140320448696319,
+STORE, 140320431910912, 140320431915007,
+STORE, 140320431915008, 140320448696319,
+SNULL, 140320549347327, 140320557735935,
+STORE, 140320549343232, 140320549347327,
+STORE, 140320549347328, 140320557735935,
+STORE, 140320272515072, 140320280907775,
+SNULL, 140320448700416, 140320457089023,
+STORE, 140320457089024, 140320473874431,
+STORE, 140320448700416, 140320457089023,
+SNULL, 140320457093119, 140320473874431,
+STORE, 140320457089024, 140320457093119,
+STORE, 140320457093120, 140320473874431,
+STORE, 140320264122368, 140320280907775,
+SNULL, 140320457093120, 140320465481727,
+STORE, 140320465481728, 140320473874431,
+STORE, 140320457093120, 140320465481727,
+SNULL, 140320465485823, 140320473874431,
+STORE, 140320465481728, 140320465485823,
+STORE, 140320465485824, 140320473874431,
+SNULL, 140320431915008, 140320440303615,
+STORE, 140320440303616, 140320448696319,
+STORE, 140320431915008, 140320440303615,
+SNULL, 140320440307711, 140320448696319,
+STORE, 140320440303616, 140320440307711,
+STORE, 140320440307712, 140320448696319,
+STORE, 140320255729664, 140320280907775,
+STORE, 140320247336960, 140320280907775,
+SNULL, 140320247341055, 140320280907775,
+STORE, 140320247336960, 140320247341055,
+STORE, 140320247341056, 140320280907775,
+STORE, 140320238944256, 140320247336959,
+STORE, 140320230551552, 140320247336959,
+SNULL, 140320230551552, 140320238944255,
+STORE, 140320238944256, 140320247336959,
+STORE, 140320230551552, 140320238944255,
+SNULL, 140320238948351, 140320247336959,
+STORE, 140320238944256, 140320238948351,
+STORE, 140320238948352, 140320247336959,
+SNULL, 140320314482688, 140320331263999,
+STORE, 140320331264000, 140320339656703,
+STORE, 140320314482688, 140320331263999,
+SNULL, 140320331268095, 140320339656703,
+STORE, 140320331264000, 140320331268095,
+STORE, 140320331268096, 140320339656703,
+SNULL, 140320280911872, 140320289300479,
+STORE, 140320289300480, 140320297693183,
+STORE, 140320280911872, 140320289300479,
+SNULL, 140320289304575, 140320297693183,
+STORE, 140320289300480, 140320289304575,
+STORE, 140320289304576, 140320297693183,
+SNULL, 140320415129599, 140320423518207,
+STORE, 140320415125504, 140320415129599,
+STORE, 140320415129600, 140320423518207,
+STORE, 140320222158848, 140320238944255,
+STORE, 140320213766144, 140320238944255,
+STORE, 140320205373440, 140320238944255,
+SNULL, 140320205377535, 140320238944255,
+STORE, 140320205373440, 140320205377535,
+STORE, 140320205377536, 140320238944255,
+SNULL, 140320314482688, 140320322871295,
+STORE, 140320322871296, 140320331263999,
+STORE, 140320314482688, 140320322871295,
+SNULL, 140320322875391, 140320331263999,
+STORE, 140320322871296, 140320322875391,
+STORE, 140320322875392, 140320331263999,
+SNULL, 140320247341056, 140320272515071,
+STORE, 140320272515072, 140320280907775,
+STORE, 140320247341056, 140320272515071,
+SNULL, 140320272519167, 140320280907775,
+STORE, 140320272515072, 140320272519167,
+STORE, 140320272519168, 140320280907775,
+SNULL, 140320247341056, 140320264122367,
+STORE, 140320264122368, 140320272515071,
+STORE, 140320247341056, 140320264122367,
+SNULL, 140320264126463, 140320272515071,
+STORE, 140320264122368, 140320264126463,
+STORE, 140320264126464, 140320272515071,
+SNULL, 140320205377536, 140320230551551,
+STORE, 140320230551552, 140320238944255,
+STORE, 140320205377536, 140320230551551,
+SNULL, 140320230555647, 140320238944255,
+STORE, 140320230551552, 140320230555647,
+STORE, 140320230555648, 140320238944255,
+STORE, 140320196980736, 140320205373439,
+SNULL, 140320196984831, 140320205373439,
+STORE, 140320196980736, 140320196984831,
+STORE, 140320196984832, 140320205373439,
+STORE, 140320188588032, 140320196980735,
+SNULL, 140320247341056, 140320255729663,
+STORE, 140320255729664, 140320264122367,
+STORE, 140320247341056, 140320255729663,
+SNULL, 140320255733759, 140320264122367,
+STORE, 140320255729664, 140320255733759,
+STORE, 140320255733760, 140320264122367,
+STORE, 140320180195328, 140320196980735,
+SNULL, 140320180199423, 140320196980735,
+STORE, 140320180195328, 140320180199423,
+STORE, 140320180199424, 140320196980735,
+STORE, 140320171802624, 140320180195327,
+STORE, 140320163409920, 140320180195327,
+SNULL, 140320163414015, 140320180195327,
+STORE, 140320163409920, 140320163414015,
+STORE, 140320163414016, 140320180195327,
+SNULL, 140320205377536, 140320222158847,
+STORE, 140320222158848, 140320230551551,
+STORE, 140320205377536, 140320222158847,
+SNULL, 140320222162943, 140320230551551,
+STORE, 140320222158848, 140320222162943,
+STORE, 140320222162944, 140320230551551,
+SNULL, 140320205377536, 140320213766143,
+STORE, 140320213766144, 140320222158847,
+STORE, 140320205377536, 140320213766143,
+SNULL, 140320213770239, 140320222158847,
+STORE, 140320213766144, 140320213770239,
+STORE, 140320213770240, 140320222158847,
+STORE, 140320155017216, 140320163409919,
+SNULL, 140320180199424, 140320188588031,
+STORE, 140320188588032, 140320196980735,
+STORE, 140320180199424, 140320188588031,
+SNULL, 140320188592127, 140320196980735,
+STORE, 140320188588032, 140320188592127,
+STORE, 140320188592128, 140320196980735,
+SNULL, 140320155021311, 140320163409919,
+STORE, 140320155017216, 140320155021311,
+STORE, 140320155021312, 140320163409919,
+SNULL, 140320163414016, 140320171802623,
+STORE, 140320171802624, 140320180195327,
+STORE, 140320163414016, 140320171802623,
+SNULL, 140320171806719, 140320180195327,
+STORE, 140320171802624, 140320171806719,
+STORE, 140320171806720, 140320180195327,
+STORE, 140320146624512, 140320155017215,
+SNULL, 140320146628607, 140320155017215,
+STORE, 140320146624512, 140320146628607,
+STORE, 140320146628608, 140320155017215,
+STORE, 140321937321984, 140321937350655,
+STORE, 140321884942336, 140321887133695,
+SNULL, 140321884942336, 140321885032447,
+STORE, 140321885032448, 140321887133695,
+STORE, 140321884942336, 140321885032447,
+SNULL, 140321887125503, 140321887133695,
+STORE, 140321885032448, 140321887125503,
+STORE, 140321887125504, 140321887133695,
+ERASE, 140321887125504, 140321887133695,
+STORE, 140321887125504, 140321887133695,
+SNULL, 140321887129599, 140321887133695,
+STORE, 140321887125504, 140321887129599,
+STORE, 140321887129600, 140321887133695,
+ERASE, 140321937321984, 140321937350655,
+ERASE, 140321086214144, 140321086218239,
+ERASE, 140321086218240, 140321094606847,
+ERASE, 140321119784960, 140321119789055,
+ERASE, 140321119789056, 140321128177663,
+ERASE, 140321245609984, 140321245614079,
+ERASE, 140321245614080, 140321254002687,
+ERASE, 140320574521344, 140320574525439,
+ERASE, 140320574525440, 140320582914047,
+ERASE, 140320297693184, 140320297697279,
+ERASE, 140320297697280, 140320306085887,
+ERASE, 140321354616832, 140321354620927,
+ERASE, 140321354620928, 140321363009535,
+ERASE, 140320834564096, 140320834568191,
+ERASE, 140320834568192, 140320842956799,
+ERASE, 140320591306752, 140320591310847,
+ERASE, 140320591310848, 140320599699455,
+ERASE, 140321136570368, 140321136574463,
+ERASE, 140321136574464, 140321144963071,
+ERASE, 140321237217280, 140321237221375,
+ERASE, 140321237221376, 140321245609983,
+ERASE, 140321363009536, 140321363013631,
+ERASE, 140321363013632, 140321371402239,
+ERASE, 140320599699456, 140320599703551,
+ERASE, 140320599703552, 140320608092159,
+ERASE, 140321396580352, 140321396584447,
+ERASE, 140321396584448, 140321404973055,
+ERASE, 140320566128640, 140320566132735,
+ERASE, 140320566132736, 140320574521343,
+ERASE, 140321094606848, 140321094610943,
+ERASE, 140321094610944, 140321102999551,
+ERASE, 140320582914048, 140320582918143,
+ERASE, 140320582918144, 140320591306751,
+ERASE, 140320289300480, 140320289304575,
+ERASE, 140320289304576, 140320297693183,
+ERASE, 140320163409920, 140320163414015,
+ };
+ unsigned long set41[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140728157171712, 140737488351231,
+SNULL, 140728157175807, 140737488351231,
+STORE, 140728157171712, 140728157175807,
+STORE, 140728157040640, 140728157175807,
+STORE, 94376106364928, 94376108613631,
+SNULL, 94376106487807, 94376108613631,
+STORE, 94376106364928, 94376106487807,
+STORE, 94376106487808, 94376108613631,
+SNULL, 94376106487808, 94376108613631,
+STORE, 94376108584960, 94376108593151,
+STORE, 94376108593152, 94376108613631,
+STORE, 140113496432640, 140113498685439,
+SNULL, 140113496575999, 140113498685439,
+STORE, 140113496432640, 140113496575999,
+STORE, 140113496576000, 140113498685439,
+SNULL, 140113496576000, 140113498685439,
+STORE, 140113498673152, 140113498681343,
+STORE, 140113498681344, 140113498685439,
+STORE, 140728157609984, 140728157618175,
+STORE, 140728157593600, 140728157609983,
+STORE, 140113498636288, 140113498673151,
+STORE, 140113498628096, 140113498636287,
+STORE, 140113492635648, 140113496432639,
+SNULL, 140113492635648, 140113494294527,
+STORE, 140113494294528, 140113496432639,
+STORE, 140113492635648, 140113494294527,
+SNULL, 140113496391679, 140113496432639,
+STORE, 140113494294528, 140113496391679,
+STORE, 140113496391680, 140113496432639,
+SNULL, 140113496391680, 140113496416255,
+STORE, 140113496416256, 140113496432639,
+STORE, 140113496391680, 140113496416255,
+SNULL, 140113496391680, 140113496416255,
+STORE, 140113496391680, 140113496416255,
+SNULL, 140113496416256, 140113496432639,
+STORE, 140113496416256, 140113496432639,
+SNULL, 140113496408063, 140113496416255,
+STORE, 140113496391680, 140113496408063,
+STORE, 140113496408064, 140113496416255,
+SNULL, 94376108589055, 94376108593151,
+STORE, 94376108584960, 94376108589055,
+STORE, 94376108589056, 94376108593151,
+SNULL, 140113498677247, 140113498681343,
+STORE, 140113498673152, 140113498677247,
+STORE, 140113498677248, 140113498681343,
+SNULL, 140113498636288, 140113498673151,
+STORE, 94376135090176, 94376135094271,
+STORE, 94376135090176, 94376135098367,
+STORE, 94376139288576, 94376139292671,
+STORE, 94376143482880, 94376143486975,
+STORE, 94376147677184, 94376147681279,
+STORE, 94376151871488, 94376151875583,
+STORE, 94376156065792, 94376156069887,
+STORE, 94376160260096, 94376160264191,
+STORE, 94376164454400, 94376164458495,
+STORE, 94376168648704, 94376168652799,
+STORE, 94376172843008, 94376172847103,
+STORE, 94376177037312, 94376177041407,
+STORE, 94376181231616, 94376181235711,
+STORE, 94376185425920, 94376185430015,
+STORE, 94376189620224, 94376189624319,
+STORE, 94376193814528, 94376193818623,
+STORE, 94376198008832, 94376198012927,
+STORE, 94376202203136, 94376202207231,
+STORE, 94376206397440, 94376206401535,
+STORE, 94376210591744, 94376210595839,
+STORE, 94376214786048, 94376214790143,
+STORE, 94376218980352, 94376218984447,
+STORE, 94376223174656, 94376223178751,
+STORE, 94376227368960, 94376227373055,
+STORE, 94376231563264, 94376231567359,
+STORE, 94376235757568, 94376235761663,
+STORE, 94376239951872, 94376239955967,
+STORE, 94376244146176, 94376244150271,
+STORE, 94376248340480, 94376248344575,
+STORE, 94376252534784, 94376252538879,
+STORE, 94376256729088, 94376256733183,
+STORE, 94376260923392, 94376260927487,
+STORE, 94376265117696, 94376265121791,
+STORE, 94376269312000, 94376269316095,
+STORE, 94376273506304, 94376273510399,
+STORE, 94376277700608, 94376277704703,
+STORE, 94376281894912, 94376281899007,
+STORE, 94376286089216, 94376286093311,
+STORE, 94376290283520, 94376290287615,
+STORE, 94376294477824, 94376294481919,
+STORE, 94376298672128, 94376298676223,
+STORE, 94376302866432, 94376302870527,
+STORE, 94376307060736, 94376307064831,
+STORE, 94376311255040, 94376311259135,
+STORE, 94376315449344, 94376315453439,
+STORE, 94376319643648, 94376319647743,
+STORE, 94376323837952, 94376323842047,
+STORE, 94376328032256, 94376328036351,
+STORE, 94376332226560, 94376332230655,
+STORE, 94376336420864, 94376336424959,
+STORE, 94376340615168, 94376340619263,
+STORE, 94376344809472, 94376344813567,
+STORE, 94376349003776, 94376349007871,
+STORE, 94376353198080, 94376353202175,
+STORE, 94376357392384, 94376357396479,
+STORE, 94376361586688, 94376361590783,
+STORE, 94376365780992, 94376365785087,
+STORE, 94376369975296, 94376369979391,
+STORE, 94376374169600, 94376374173695,
+STORE, 94376378363904, 94376378367999,
+STORE, 94376382558208, 94376382562303,
+STORE, 94376386752512, 94376386756607,
+STORE, 94376390946816, 94376390950911,
+STORE, 94376395141120, 94376395145215,
+STORE, 94376399335424, 94376399339519,
+STORE, 94376403529728, 94376403533823,
+STORE, 94376407724032, 94376407728127,
+STORE, 94376411918336, 94376411922431,
+STORE, 94376416112640, 94376416116735,
+STORE, 94376420306944, 94376420311039,
+STORE, 94376424501248, 94376424505343,
+STORE, 94376428695552, 94376428699647,
+STORE, 94376432889856, 94376432893951,
+STORE, 94376437084160, 94376437088255,
+STORE, 94376441278464, 94376441282559,
+STORE, 94376445472768, 94376445476863,
+STORE, 94376449667072, 94376449671167,
+STORE, 94376453861376, 94376453865471,
+STORE, 94376458055680, 94376458059775,
+STORE, 94376462249984, 94376462254079,
+STORE, 94376466444288, 94376466448383,
+STORE, 94376470638592, 94376470642687,
+STORE, 94376474832896, 94376474836991,
+STORE, 94376479027200, 94376479031295,
+STORE, 94376483221504, 94376483225599,
+STORE, 94376487415808, 94376487419903,
+STORE, 94376491610112, 94376491614207,
+STORE, 94376495804416, 94376495808511,
+STORE, 94376499998720, 94376500002815,
+STORE, 94376504193024, 94376504197119,
+STORE, 94376508387328, 94376508391423,
+STORE, 94376512581632, 94376512585727,
+STORE, 94376516775936, 94376516780031,
+STORE, 94376520970240, 94376520974335,
+STORE, 94376525164544, 94376525168639,
+STORE, 94376529358848, 94376529362943,
+STORE, 94376533553152, 94376533557247,
+STORE, 94376537747456, 94376537751551,
+STORE, 94376541941760, 94376541945855,
+STORE, 94376546136064, 94376546140159,
+STORE, 94376550330368, 94376550334463,
+STORE, 94376554524672, 94376554528767,
+STORE, 94376558718976, 94376558723071,
+STORE, 94376562913280, 94376562917375,
+STORE, 94376567107584, 94376567111679,
+STORE, 94376571301888, 94376571305983,
+STORE, 94376575496192, 94376575500287,
+STORE, 94376579690496, 94376579694591,
+STORE, 94376583884800, 94376583888895,
+STORE, 94376588079104, 94376588083199,
+STORE, 94376592273408, 94376592277503,
+STORE, 94376596467712, 94376596471807,
+STORE, 94376600662016, 94376600666111,
+STORE, 94376604856320, 94376604860415,
+STORE, 94376609050624, 94376609054719,
+STORE, 94376613244928, 94376613249023,
+STORE, 94376617439232, 94376617443327,
+STORE, 94376621633536, 94376621637631,
+STORE, 94376625827840, 94376625831935,
+STORE, 94376630022144, 94376630026239,
+STORE, 94376634216448, 94376634220543,
+STORE, 94376638410752, 94376638414847,
+STORE, 94376642605056, 94376642609151,
+STORE, 94376646799360, 94376646803455,
+STORE, 94376650993664, 94376650997759,
+STORE, 94376655187968, 94376655192063,
+STORE, 94376659382272, 94376659386367,
+STORE, 94376663576576, 94376663580671,
+STORE, 94376667770880, 94376667774975,
+STORE, 94376671965184, 94376671969279,
+STORE, 94376676159488, 94376676163583,
+STORE, 94376680353792, 94376680357887,
+STORE, 94376684548096, 94376684552191,
+STORE, 94376688742400, 94376688746495,
+STORE, 94376692936704, 94376692940799,
+STORE, 94376697131008, 94376697135103,
+STORE, 94376701325312, 94376701329407,
+STORE, 94376705519616, 94376705523711,
+STORE, 94376709713920, 94376709718015,
+STORE, 94376713908224, 94376713912319,
+STORE, 94376718102528, 94376718106623,
+STORE, 94376722296832, 94376722300927,
+STORE, 94376726491136, 94376726495231,
+STORE, 94376730685440, 94376730689535,
+STORE, 94376734879744, 94376734883839,
+STORE, 94376739074048, 94376739078143,
+STORE, 94376743268352, 94376743272447,
+STORE, 94376747462656, 94376747466751,
+STORE, 94376751656960, 94376751661055,
+STORE, 94376755851264, 94376755855359,
+STORE, 94376760045568, 94376760049663,
+STORE, 94376764239872, 94376764243967,
+STORE, 94376768434176, 94376768438271,
+STORE, 94376772628480, 94376772632575,
+STORE, 94376776822784, 94376776826879,
+STORE, 94376781017088, 94376781021183,
+STORE, 94376785211392, 94376785215487,
+STORE, 94376789405696, 94376789409791,
+STORE, 94376793600000, 94376793604095,
+STORE, 94376797794304, 94376797798399,
+STORE, 94376801988608, 94376801992703,
+STORE, 94376806182912, 94376806187007,
+STORE, 94376810377216, 94376810381311,
+STORE, 94376814571520, 94376814575615,
+STORE, 94376818765824, 94376818769919,
+STORE, 94376822960128, 94376822964223,
+STORE, 94376827154432, 94376827158527,
+STORE, 94376831348736, 94376831352831,
+STORE, 94376835543040, 94376835547135,
+STORE, 94376839737344, 94376839741439,
+STORE, 94376843931648, 94376843935743,
+STORE, 94376848125952, 94376848130047,
+STORE, 94376852320256, 94376852324351,
+STORE, 94376856514560, 94376856518655,
+STORE, 94376860708864, 94376860712959,
+STORE, 94376864903168, 94376864907263,
+STORE, 94376869097472, 94376869101567,
+STORE, 94376873291776, 94376873295871,
+STORE, 94376877486080, 94376877490175,
+STORE, 94376881680384, 94376881684479,
+STORE, 94376885874688, 94376885878783,
+STORE, 94376890068992, 94376890073087,
+STORE, 94376894263296, 94376894267391,
+STORE, 94376898457600, 94376898461695,
+STORE, 94376902651904, 94376902655999,
+STORE, 94376906846208, 94376906850303,
+STORE, 94376911040512, 94376911044607,
+STORE, 94376915234816, 94376915238911,
+STORE, 94376919429120, 94376919433215,
+STORE, 94376923623424, 94376923627519,
+STORE, 94376927817728, 94376927821823,
+STORE, 94376932012032, 94376932016127,
+STORE, 94376936206336, 94376936210431,
+STORE, 94376940400640, 94376940404735,
+STORE, 94376944594944, 94376944599039,
+STORE, 94376948789248, 94376948793343,
+STORE, 94376952983552, 94376952987647,
+STORE, 94376957177856, 94376957181951,
+STORE, 94376961372160, 94376961376255,
+STORE, 94376965566464, 94376965570559,
+STORE, 94376969760768, 94376969764863,
+STORE, 94376973955072, 94376973959167,
+STORE, 94376978149376, 94376978153471,
+STORE, 94376982343680, 94376982347775,
+STORE, 94376986537984, 94376986542079,
+STORE, 94376990732288, 94376990736383,
+STORE, 94376994926592, 94376994930687,
+STORE, 94376999120896, 94376999124991,
+STORE, 94377003315200, 94377003319295,
+STORE, 94377007509504, 94377007513599,
+STORE, 94377011703808, 94377011707903,
+STORE, 94377015898112, 94377015902207,
+STORE, 94377020092416, 94377020096511,
+STORE, 94377024286720, 94377024290815,
+STORE, 94377028481024, 94377028485119,
+STORE, 94377032675328, 94377032679423,
+STORE, 94377036869632, 94377036873727,
+STORE, 94377041063936, 94377041068031,
+STORE, 94377045258240, 94377045262335,
+STORE, 94377049452544, 94377049456639,
+STORE, 94377053646848, 94377053650943,
+STORE, 94377057841152, 94377057845247,
+STORE, 94377062035456, 94377062039551,
+STORE, 94377066229760, 94377066233855,
+STORE, 94377070424064, 94377070428159,
+STORE, 94377074618368, 94377074622463,
+STORE, 94377078812672, 94377078816767,
+STORE, 94377083006976, 94377083011071,
+STORE, 94377087201280, 94377087205375,
+STORE, 94377091395584, 94377091399679,
+STORE, 94377095589888, 94377095593983,
+STORE, 94377099784192, 94377099788287,
+STORE, 94377103978496, 94377103982591,
+STORE, 94377108172800, 94377108176895,
+STORE, 94377112367104, 94377112371199,
+STORE, 94377116561408, 94377116565503,
+STORE, 94377120755712, 94377120759807,
+STORE, 94377124950016, 94377124954111,
+STORE, 94377129144320, 94377129148415,
+STORE, 94377133338624, 94377133342719,
+STORE, 94377137532928, 94377137537023,
+STORE, 94377141727232, 94377141731327,
+STORE, 94377145921536, 94377145925631,
+STORE, 94377150115840, 94377150119935,
+STORE, 94377154310144, 94377154314239,
+STORE, 94377158504448, 94377158508543,
+STORE, 94377162698752, 94377162702847,
+STORE, 94377166893056, 94377166897151,
+STORE, 94377171087360, 94377171091455,
+STORE, 94377175281664, 94377175285759,
+STORE, 94377179475968, 94377179480063,
+STORE, 94377183670272, 94377183674367,
+STORE, 94377187864576, 94377187868671,
+STORE, 94377192058880, 94377192062975,
+STORE, 94377196253184, 94377196257279,
+STORE, 94377200447488, 94377200451583,
+STORE, 94377204641792, 94377204645887,
+SNULL, 94376135094271, 94376135098367,
+STORE, 94376135090176, 94376135094271,
+STORE, 94376135094272, 94376135098367,
+SNULL, 94376135094272, 94377208836095,
+ };
+ unsigned long set42[] = {
+STORE, 314572800, 1388314623,
+STORE, 1462157312, 1462169599,
+STORE, 1462169600, 1462185983,
+STORE, 1462185984, 1462190079,
+STORE, 1462190080, 1462194175,
+STORE, 1462194176, 1462198271,
+STORE, 1879986176, 1881800703,
+STORE, 1881800704, 1882034175,
+STORE, 1882034176, 1882193919,
+STORE, 1882193920, 1882406911,
+STORE, 1882406912, 1882451967,
+STORE, 1882451968, 1882996735,
+STORE, 1882996736, 1885892607,
+STORE, 1885892608, 1885896703,
+STORE, 1885896704, 1885904895,
+STORE, 1885904896, 1885908991,
+STORE, 1885908992, 1885913087,
+STORE, 1885913088, 1885966335,
+STORE, 1885966336, 1886232575,
+STORE, 1886232576, 1886236671,
+STORE, 1886236672, 1886240767,
+STORE, 1886240768, 1886244863,
+STORE, 1886244864, 1886248959,
+STORE, 1886248960, 1886294015,
+STORE, 1886294016, 1886494719,
+STORE, 1886494720, 1886498815,
+STORE, 1886498816, 1886502911,
+STORE, 1886502912, 1886507007,
+STORE, 1886507008, 1886511103,
+STORE, 1886511104, 1886556159,
+STORE, 1886556160, 1886629887,
+STORE, 1886629888, 1886633983,
+STORE, 1886633984, 1886638079,
+STORE, 1886638080, 1886642175,
+STORE, 1886642176, 1886646271,
+STORE, 1886646272, 1886666751,
+STORE, 1886666752, 1886670847,
+STORE, 1886670848, 1886674943,
+STORE, 1886674944, 1886679039,
+STORE, 1886679040, 1895419903,
+STORE, 1895419904, 1895550975,
+STORE, 1895550976, 1896148991,
+STORE, 1896148992, 1897189375,
+STORE, 1897189376, 1897701375,
+STORE, 1897701376, 1897803775,
+STORE, 1897803776, 1897816063,
+STORE, 1897816064, 1899913215,
+STORE, 1899913216, 1909379071,
+STORE, 1909379072, 1909387263,
+STORE, 1909387264, 1909391359,
+STORE, 1909391360, 1909432319,
+STORE, 1909432320, 1909436415,
+STORE, 1909436416, 1909440511,
+STORE, 1909440512, 1909460991,
+STORE, 1909460992, 1909547007,
+STORE, 1909547008, 1909551103,
+STORE, 1909551104, 1909555199,
+STORE, 1909555200, 1909559295,
+STORE, 1909559296, 1909563391,
+STORE, 1909563392, 1909739519,
+STORE, 1909739520, 1910566911,
+STORE, 1910566912, 1910571007,
+STORE, 1910571008, 1910575103,
+STORE, 1910575104, 1910579199,
+STORE, 1910579200, 1910583295,
+STORE, 1910583296, 1910587391,
+STORE, 1910587392, 1910620159,
+STORE, 1910620160, 1910624255,
+STORE, 1910624256, 1910628351,
+STORE, 1910628352, 1910632447,
+STORE, 1910632448, 1910652927,
+STORE, 1910652928, 1910657023,
+STORE, 1910657024, 1910661119,
+STORE, 1910661120, 1910665215,
+STORE, 1910665216, 1910669311,
+STORE, 1910669312, 1910677503,
+STORE, 1910677504, 1910681599,
+STORE, 1910681600, 1910685695,
+STORE, 1910685696, 1910689791,
+STORE, 1910689792, 1910697983,
+STORE, 1910697984, 1910702079,
+STORE, 1910702080, 1910706175,
+STORE, 1910706176, 1910710271,
+STORE, 1910710272, 1914093567,
+STORE, 1914093568, 1914097663,
+STORE, 1914097664, 1969434623,
+STORE, 1969434624, 1977819135,
+STORE, 3290435584, 3426750463,
+STORE, 3426750464, 3426754559,
+STORE, 3426754560, 3426762751,
+STORE, 3426762752, 3426766847,
+STORE, 3426766848, 3426770943,
+STORE, 3427037184, 3427061759,
+STORE, 3427061760, 3427135487,
+STORE, 3427135488, 3427143679,
+STORE, 3427143680, 3427147775,
+STORE, 3427147776, 3427209215,
+STORE, 3427319808, 3432116223,
+STORE, 3432116224, 3450130431,
+STORE, 3450130432, 3451027455,
+STORE, 3451027456, 3451031551,
+STORE, 3451031552, 3451461631,
+STORE, 3451736064, 3456688127,
+STORE, 3456688128, 3475222527,
+STORE, 3475222528, 3476119551,
+STORE, 3476119552, 3476127743,
+STORE, 3476127744, 3476553727,
+STORE, 3476631552, 3477315583,
+STORE, 3477315584, 3479949311,
+STORE, 3479949312, 3480002559,
+STORE, 3480002560, 3480006655,
+STORE, 3480006656, 3480432639,
+STORE, 3480539136, 3480543231,
+STORE, 3480543232, 3480547327,
+STORE, 3480547328, 3480555519,
+STORE, 3480854528, 3480903679,
+STORE, 3480903680, 3480969215,
+STORE, 3480969216, 3480977407,
+STORE, 3480977408, 3480981503,
+STORE, 3481030656, 3481092095,
+STORE, 3481092096, 3481235455,
+STORE, 3481235456, 3481243647,
+STORE, 3481243648, 3481247743,
+STORE, 3481436160, 3481444351,
+STORE, 3481444352, 3481456639,
+STORE, 3481456640, 3481460735,
+STORE, 3481460736, 3481464831,
+STORE, 3481587712, 3481645055,
+STORE, 3481645056, 3481772031,
+STORE, 3481772032, 3481776127,
+STORE, 3481776128, 3481780223,
+STORE, 3481874432, 3481935871,
+STORE, 3481935872, 3482030079,
+STORE, 3482030080, 3482038271,
+STORE, 3482038272, 3482042367,
+STORE, 3482198016, 3482230783,
+STORE, 3482230784, 3482271743,
+STORE, 3482271744, 3482279935,
+STORE, 3482279936, 3482284031,
+STORE, 3482562560, 3482566655,
+STORE, 3482566656, 3482570751,
+STORE, 3482570752, 3482574847,
+STORE, 3482636288, 3482689535,
+STORE, 3482689536, 3482746879,
+STORE, 3482746880, 3482755071,
+STORE, 3482755072, 3482759167,
+STORE, 3482972160, 3483062271,
+STORE, 3483062272, 3483242495,
+STORE, 3483242496, 3483246591,
+STORE, 3483246592, 3483250687,
+STORE, 3483398144, 3483688959,
+STORE, 3483688960, 3484114943,
+STORE, 3484114944, 3484131327,
+STORE, 3484131328, 3484135423,
+STORE, 3484135424, 3484143615,
+STORE, 3484184576, 3484475391,
+STORE, 3484475392, 3485028351,
+STORE, 3485028352, 3485057023,
+STORE, 3485057024, 3485061119,
+STORE, 3485360128, 3485364223,
+STORE, 3485364224, 3485368319,
+STORE, 3485368320, 3485372415,
+STORE, 3485589504, 3485593599,
+STORE, 3485593600, 3485597695,
+STORE, 3485597696, 3485601791,
+STORE, 3485913088, 3485937663,
+STORE, 3485937664, 3485974527,
+STORE, 3485974528, 3485982719,
+STORE, 3485982720, 3485986815,
+STORE, 3486052352, 3486056447,
+STORE, 3486056448, 3486064639,
+STORE, 3486064640, 3486068735,
+STORE, 3486068736, 3486072831,
+STORE, 3486294016, 3486302207,
+STORE, 3486302208, 3486306303,
+STORE, 3486306304, 3486310399,
+STORE, 3486310400, 3486314495,
+STORE, 3486670848, 3486679039,
+STORE, 3486679040, 3486683135,
+STORE, 3486683136, 3486687231,
+STORE, 3486687232, 3486691327,
+STORE, 3486863360, 3486871551,
+STORE, 3486871552, 3486875647,
+STORE, 3486875648, 3486879743,
+STORE, 3486879744, 3486883839,
+STORE, 3487584256, 3522543615,
+STORE, 3522543616, 3523321855,
+STORE, 3523321856, 3523342335,
+STORE, 3523342336, 3523387391,
+STORE, 3523387392, 3523391487,
+STORE, 3523391488, 3523395583,
+STORE, 3523477504, 3523686399,
+STORE, 3523686400, 3523981311,
+STORE, 3523981312, 3523997695,
+STORE, 3523997696, 3524001791,
+STORE, 3524177920, 3525013503,
+STORE, 3525013504, 3526582271,
+STORE, 3526582272, 3526606847,
+STORE, 3526606848, 3526610943,
+STORE, 3526610944, 3526615039,
+STORE, 3526672384, 3526746111,
+STORE, 3526746112, 3526860799,
+STORE, 3526860800, 3526868991,
+STORE, 3526868992, 3526873087,
+STORE, 3527000064, 3527475199,
+STORE, 3527475200, 3527479295,
+STORE, 3527479296, 3527573503,
+STORE, 3527573504, 3527581695,
+STORE, 3527581696, 3527585791,
+STORE, 3527585792, 3527606271,
+STORE, 3527909376, 3527913471,
+STORE, 3527913472, 3527917567,
+STORE, 3527917568, 3527921663,
+STORE, 3527950336, 3528011775,
+STORE, 3528011776, 3528093695,
+STORE, 3528093696, 3528101887,
+STORE, 3528101888, 3528105983,
+STORE, 3528228864, 3528241151,
+STORE, 3528241152, 3528261631,
+STORE, 3528261632, 3528265727,
+STORE, 3528273920, 3528593407,
+STORE, 3528593408, 3528609791,
+STORE, 3528609792, 3528638463,
+STORE, 3528638464, 3528642559,
+STORE, 3528642560, 3528646655,
+STORE, 3528880128, 3528912895,
+STORE, 3528912896, 3528962047,
+STORE, 3528962048, 3528966143,
+STORE, 3528966144, 3528970239,
+STORE, 3528982528, 3530293247,
+STORE, 3530366976, 3530825727,
+STORE, 3530825728, 3531317247,
+STORE, 3531317248, 3541041151,
+STORE, 3541041152, 3541303295,
+STORE, 3541430272, 3566206975,
+STORE, 3566206976, 3566993407,
+STORE, 3567239168, 3587571711,
+STORE, 3587571712, 3588284415,
+STORE, 3588284416, 3588661247,
+STORE, 3588661248, 3589066751,
+STORE, 3589066752, 3589574655,
+STORE, 3589574656, 3590078463,
+STORE, 3590078464, 3590373375,
+STORE, 3590373376, 3590668287,
+STORE, 3590668288, 3590963199,
+STORE, 3590963200, 3591294975,
+STORE, 3591294976, 3591602175,
+STORE, 3591602176, 3591933951,
+STORE, 3591933952, 3592241151,
+STORE, 3592241152, 3592572927,
+STORE, 3592572928, 3592876031,
+STORE, 3592876032, 3593211903,
+STORE, 3593211904, 3593547775,
+STORE, 3593547776, 3593650175,
+STORE, 3593650176, 3593928703,
+STORE, 3593928704, 3593936895,
+STORE, 3593936896, 3593940991,
+STORE, 3594006528, 3594301439,
+STORE, 3594301440, 3594739711,
+STORE, 3594739712, 3594756095,
+STORE, 3594756096, 3594760191,
+STORE, 3594760192, 3594768383,
+STORE, 3594952704, 3595051007,
+STORE, 3595051008, 3595223039,
+STORE, 3595223040, 3595227135,
+STORE, 3595227136, 3595235327,
+STORE, 3595431936, 3595775999,
+STORE, 3595776000, 3596701695,
+STORE, 3596701696, 3596742655,
+STORE, 3596742656, 3596746751,
+STORE, 3596746752, 3596750847,
+STORE, 3596767232, 3597070335,
+STORE, 3597070336, 3597402111,
+STORE, 3597402112, 3598188543,
+STORE, 3598262272, 3623428095,
+STORE, 3623428096, 3623432191,
+STORE, 3623432192, 3623436287,
+STORE, 3623436288, 3623440383,
+STORE, 3623616512, 3623878655,
+STORE, 3624169472, 3624300543,
+STORE, 3627524096, 3628523519,
+STORE, 3628523520, 3629522943,
+STORE, 3696631808, 3730186239,
+STORE, 3730186240, 3763740671,
+STORE, 3763740672, 3764027391,
+STORE, 3764027392, 3765133311,
+STORE, 3765133312, 3765145599,
+STORE, 3765145600, 3765149695,
+STORE, 3765178368, 3766022143,
+STORE, 3766022144, 3768791039,
+STORE, 3768791040, 3768840191,
+STORE, 3768840192, 3768844287,
+STORE, 3768897536, 3768913919,
+STORE, 3768913920, 3768934399,
+STORE, 3768934400, 3768938495,
+STORE, 3769016320, 3769147391,
+STORE, 3769147392, 3769233407,
+STORE, 3769233408, 3769356287,
+STORE, 3769356288, 3769360383,
+STORE, 3769360384, 3769368575,
+STORE, 3769376768, 3794542591,
+STORE, 3794542592, 3794599935,
+STORE, 3794599936, 3794731007,
+STORE, 3794731008, 3794735103,
+STORE, 3794735104, 3794743295,
+STORE, 3794849792, 3794980863,
+STORE, 3794980864, 3794984959,
+STORE, 3794984960, 3794989055,
+STORE, 3794989056, 3794993151,
+STORE, 3794993152, 3794997247,
+STORE, 3795103744, 3795128319,
+STORE, 3795128320, 3795165183,
+STORE, 3795165184, 3795169279,
+STORE, 3795169280, 3795173375,
+STORE, 3795210240, 3795357695,
+STORE, 3795357696, 3795365887,
+STORE, 3795365888, 3795374079,
+STORE, 3795374080, 3795378175,
+STORE, 3795378176, 3795382271,
+STORE, 3795406848, 3795738623,
+STORE, 3795738624, 3795742719,
+STORE, 3795742720, 3795755007,
+STORE, 3795755008, 3795759103,
+STORE, 3795763200, 3795894271,
+STORE, 3795894272, 3796041727,
+STORE, 3796041728, 3796054015,
+STORE, 3796054016, 3796066303,
+STORE, 3796066304, 3796070399,
+STORE, 3796176896, 3796205567,
+STORE, 3796205568, 3796250623,
+STORE, 3796250624, 3796254719,
+STORE, 3796254720, 3796258815,
+STORE, 3796262912, 3796393983,
+STORE, 3796393984, 3796516863,
+STORE, 3796516864, 3796873215,
+STORE, 3796873216, 3796885503,
+STORE, 3796885504, 3796889599,
+STORE, 3796963328, 3796967423,
+STORE, 3796967424, 3796975615,
+STORE, 3796975616, 3796979711,
+STORE, 3797000192, 3797307391,
+STORE, 3797307392, 3797311487,
+STORE, 3797311488, 3797315583,
+STORE, 3797315584, 3797323775,
+STORE, 3797327872, 3797450751,
+STORE, 3797450752, 3797458943,
+STORE, 3797458944, 3797471231,
+STORE, 3797471232, 3797475327,
+STORE, 3797577728, 3797700607,
+STORE, 3797700608, 3797721087,
+STORE, 3797721088, 3797733375,
+STORE, 3797733376, 3797741567,
+STORE, 3797741568, 3797864447,
+STORE, 3797864448, 3797995519,
+STORE, 3797995520, 3798048767,
+STORE, 3798048768, 3798179839,
+STORE, 3798179840, 3798188031,
+STORE, 3798188032, 3798192127,
+STORE, 3798290432, 3798302719,
+STORE, 3798302720, 3798323199,
+STORE, 3798323200, 3798327295,
+STORE, 3798327296, 3798331391,
+STORE, 3798429696, 3798433791,
+STORE, 3798433792, 3798552575,
+STORE, 3798552576, 3798556671,
+STORE, 3798556672, 3798568959,
+STORE, 3798568960, 3798573055,
+STORE, 3798573056, 3798581247,
+STORE, 3798618112, 3798749183,
+STORE, 3798749184, 3798855679,
+STORE, 3798855680, 3798966271,
+STORE, 3798966272, 3798982655,
+STORE, 3798982656, 3798986751,
+STORE, 3799101440, 3799171071,
+STORE, 3799171072, 3799240703,
+STORE, 3799240704, 3799248895,
+STORE, 3799248896, 3799252991,
+STORE, 3799326720, 3799650303,
+STORE, 3799650304, 3800629247,
+STORE, 3800629248, 3800641535,
+STORE, 3800641536, 3800645631,
+STORE, 3800645632, 3800649727,
+STORE, 3800649728, 3800903679,
+STORE, 3800903680, 3800936447,
+STORE, 3800936448, 3800969215,
+STORE, 3800969216, 3800981503,
+STORE, 3800981504, 3800985599,
+STORE, 3801001984, 3801133055,
+STORE, 3801133056, 3801202687,
+STORE, 3801202688, 3801591807,
+STORE, 3801591808, 3801599999,
+STORE, 3801600000, 3801604095,
+STORE, 3801604096, 3801608191,
+STORE, 3801608192, 3801739263,
+STORE, 3801739264, 3801755647,
+STORE, 3801755648, 3801796607,
+STORE, 3801796608, 3801804799,
+STORE, 3801804800, 3801808895,
+STORE, 3801878528, 3801944063,
+STORE, 3801944064, 3802116095,
+STORE, 3802116096, 3802124287,
+STORE, 3802124288, 3802128383,
+STORE, 3802136576, 3803447295,
+STORE, 3803492352, 3803553791,
+STORE, 3803553792, 3804233727,
+STORE, 3804233728, 3806068735,
+STORE, 3806121984, 3806253055,
+STORE, 3806253056, 3806674943,
+STORE, 3806674944, 3807117311,
+STORE, 3807117312, 3807379455,
+STORE, 3807379456, 3807432703,
+STORE, 3807432704, 3807563775,
+STORE, 3807563776, 3809202175,
+STORE, 3809202176, 3810250751,
+STORE, 3810250752, 3827027967,
+STORE, 3827027968, 3829125119,
+STORE, 3829125120, 3837513727,
+STORE, 3837513728, 3839610879,
+STORE, 3839610880, 3847999487,
+STORE, 3847999488, 3856392191,
+STORE, 3856392192, 3864784895,
+STORE, 3864784896, 3868983295,
+STORE, 3868983296, 3885760511,
+STORE, 3885760512, 3886809087,
+STORE, 3886809088, 3887857663,
+STORE, 3887857664, 3888119807,
+STORE, 3888144384, 3888148479,
+STORE, 3888148480, 3888218111,
+STORE, 3888218112, 3888222207,
+STORE, 3888222208, 3888353279,
+STORE, 3888353280, 3889172479,
+STORE, 3889172480, 3892314111,
+STORE, 3892314112, 3892576255,
+STORE, 3892588544, 3892637695,
+STORE, 3892637696, 3892686847,
+STORE, 3892686848, 3892744191,
+STORE, 3892748288, 3892785151,
+STORE, 3892785152, 3895459839,
+STORE, 3895459840, 3895721983,
+STORE, 3895738368, 3895885823,
+STORE, 3895885824, 3897081855,
+STORE, 3897081856, 3906482175,
+STORE, 3906482176, 3916144639,
+STORE, 3916144640, 3925766143,
+STORE, 3925766144, 3926974463,
+STORE, 3926974464, 3928367103,
+STORE, 3928367104, 3928911871,
+STORE, 3928911872, 3933995007,
+STORE, 3933995008, 3935830015,
+STORE, 3935830016, 3935846399,
+STORE, 3935879168, 3936010239,
+STORE, 3936010240, 3936026623,
+STORE, 3936026624, 3936034815,
+STORE, 3936034816, 3936051199,
+STORE, 3936051200, 3936055295,
+STORE, 3936071680, 3936137215,
+STORE, 3936137216, 3936202751,
+STORE, 3936202752, 3936219135,
+STORE, 3936235520, 3936251903,
+STORE, 3936268288, 3936276479,
+STORE, 3936276480, 3936284671,
+STORE, 3936284672, 3936288767,
+STORE, 3936288768, 3936292863,
+STORE, 3936296960, 3936354303,
+STORE, 3936354304, 3936616447,
+STORE, 3936628736, 3936669695,
+STORE, 3936669696, 3936747519,
+STORE, 3936747520, 3936870399,
+STORE, 3936870400, 3936874495,
+STORE, 3936874496, 3936878591,
+STORE, 3936882688, 3936903167,
+STORE, 3936911360, 3936948223,
+STORE, 3936948224, 3936964607,
+STORE, 3936964608, 3937103871,
+STORE, 3937103872, 3937107967,
+STORE, 3937132544, 3937161215,
+STORE, 3937189888, 3937255423,
+STORE, 3937255424, 3938512895,
+STORE, 3938512896, 3945435135,
+STORE, 3945435136, 3945476095,
+STORE, 3945476096, 3945484287,
+STORE, 3945484288, 3945496575,
+STORE, 3945500672, 3945541631,
+STORE, 3945558016, 3945566207,
+STORE, 3945566208, 3945594879,
+STORE, 3945594880, 3945598975,
+STORE, 3945598976, 3945603071,
+STORE, 3945611264, 3945742335,
+STORE, 3945742336, 3945844735,
+STORE, 3945844736, 3945848831,
+STORE, 3945848832, 3945861119,
+STORE, 3945861120, 3945865215,
+STORE, 3945869312, 3945897983,
+STORE, 3945897984, 3946303487,
+STORE, 3946303488, 3946397695,
+STORE, 3946397696, 3946569727,
+STORE, 3946569728, 3946573823,
+STORE, 3946573824, 3946594303,
+STORE, 3946594304, 3946663935,
+STORE, 3946663936, 3946708991,
+STORE, 3946708992, 3946823679,
+STORE, 3946823680, 3946827775,
+STORE, 3946827776, 3946831871,
+STORE, 3946831872, 3946860543,
+STORE, 3946893312, 3946897407,
+STORE, 3946897408, 3946905599,
+STORE, 3946905600, 3946909695,
+STORE, 3946909696, 3946913791,
+STORE, 3946913792, 3946930175,
+STORE, 3946930176, 3946967039,
+STORE, 3946967040, 3947102207,
+STORE, 3947102208, 3948412927,
+STORE, 3948441600, 3948556287,
+STORE, 3948556288, 3948576767,
+STORE, 3948576768, 3948597247,
+STORE, 3948597248, 3948605439,
+STORE, 3948605440, 3948609535,
+STORE, 3948609536, 3948654591,
+STORE, 3948654592, 3948781567,
+STORE, 3948781568, 3948822527,
+STORE, 3948822528, 3948904447,
+STORE, 3948904448, 3948908543,
+STORE, 3948908544, 3948912639,
+STORE, 3948945408, 3949043711,
+STORE, 3949043712, 3949174783,
+STORE, 3949174784, 3949191167,
+STORE, 3949191168, 3949195263,
+STORE, 3949207552, 3949252607,
+STORE, 3949252608, 3949256703,
+STORE, 3949256704, 3949363199,
+STORE, 3949363200, 3949367295,
+STORE, 3949367296, 3949379583,
+STORE, 3949379584, 3949383679,
+STORE, 3949383680, 3949400063,
+STORE, 3949400064, 3949404159,
+STORE, 3949416448, 3949481983,
+STORE, 3949481984, 3949486079,
+STORE, 3949486080, 3949592575,
+STORE, 3949592576, 3949596671,
+STORE, 3949596672, 3949621247,
+STORE, 3949621248, 3949662207,
+STORE, 3949662208, 3949666303,
+STORE, 3949694976, 3949727743,
+STORE, 3949727744, 3949731839,
+STORE, 3949731840, 3949838335,
+STORE, 3949838336, 3949842431,
+STORE, 3949842432, 3949846527,
+STORE, 3949846528, 3949854719,
+STORE, 3949854720, 3949858815,
+STORE, 3949858816, 3949862911,
+STORE, 3949867008, 3949891583,
+STORE, 3949891584, 3949928447,
+STORE, 3949928448, 3949993983,
+STORE, 3949993984, 3950043135,
+STORE, 3950043136, 3950059519,
+STORE, 3950059520, 3950096383,
+STORE, 3950096384, 3950100479,
+STORE, 3950100480, 3950104575,
+STORE, 3950104576, 3950157823,
+STORE, 3950157824, 3950292991,
+STORE, 3950292992, 3950346239,
+STORE, 3950346240, 3950477311,
+STORE, 3950477312, 3950485503,
+STORE, 3950485504, 3950489599,
+STORE, 3950493696, 3950510079,
+STORE, 3950510080, 3950661631,
+STORE, 3950661632, 3951005695,
+STORE, 3951005696, 3951026175,
+STORE, 3951026176, 3951030271,
+STORE, 3951030272, 3951054847,
+STORE, 3951054848, 3951116287,
+STORE, 3951116288, 3951144959,
+STORE, 3951144960, 3951149055,
+STORE, 3951149056, 3951194111,
+STORE, 3951194112, 3951202303,
+STORE, 3951202304, 3951206399,
+STORE, 3951210496, 3951226879,
+STORE, 3951226880, 3951329279,
+STORE, 3951329280, 3951366143,
+STORE, 3951366144, 3951411199,
+STORE, 3951411200, 3951415295,
+STORE, 3951415296, 3951419391,
+STORE, 3951419392, 3951452159,
+STORE, 3951452160, 3951566847,
+STORE, 3951566848, 3951812607,
+STORE, 3951812608, 3952173055,
+STORE, 3952173056, 3952214015,
+STORE, 3952214016, 3952218111,
+STORE, 3952222208, 3952250879,
+STORE, 3952250880, 3952369663,
+STORE, 3952369664, 3952488447,
+STORE, 3952488448, 3952627711,
+STORE, 3952627712, 3952635903,
+STORE, 3952635904, 3952639999,
+STORE, 3952652288, 3952668671,
+STORE, 3952668672, 3953000447,
+STORE, 3953000448, 3953004543,
+STORE, 3953004544, 3953008639,
+STORE, 3953008640, 3953012735,
+STORE, 3953012736, 3953037311,
+STORE, 3953037312, 3953151999,
+STORE, 3953152000, 3953291263,
+STORE, 3953291264, 3953324031,
+STORE, 3953324032, 3953364991,
+STORE, 3953364992, 3953373183,
+STORE, 3953373184, 3953377279,
+STORE, 3953381376, 3953410047,
+STORE, 3953410048, 3953491967,
+STORE, 3953491968, 3953643519,
+STORE, 3953643520, 3953651711,
+STORE, 3953651712, 3953655807,
+STORE, 3953659904, 3953766399,
+STORE, 3953766400, 3953774591,
+STORE, 3953774592, 3953786879,
+STORE, 3953786880, 3953790975,
+STORE, 3953790976, 3953823743,
+STORE, 3953823744, 3953963007,
+STORE, 3953963008, 3954024447,
+STORE, 3954024448, 3954118655,
+STORE, 3954118656, 3954122751,
+STORE, 3954122752, 3954126847,
+STORE, 3954130944, 3954184191,
+STORE, 3954184192, 3954294783,
+STORE, 3954294784, 3954323455,
+STORE, 3954323456, 3954393087,
+STORE, 3954393088, 3954397183,
+STORE, 3954397184, 3954401279,
+STORE, 3954401280, 3954405375,
+STORE, 3954409472, 3954528255,
+STORE, 3954528256, 3954737151,
+STORE, 3954737152, 3955052543,
+STORE, 3955052544, 3955060735,
+STORE, 3955060736, 3955064831,
+STORE, 3955068928, 3955105791,
+STORE, 3955105792, 3955167231,
+STORE, 3955167232, 3955277823,
+STORE, 3955277824, 3955310591,
+STORE, 3955310592, 3955351551,
+STORE, 3955351552, 3955359743,
+STORE, 3955359744, 3955363839,
+STORE, 3955363840, 3955392511,
+STORE, 3955392512, 3955453951,
+STORE, 3955453952, 3955601407,
+STORE, 3955601408, 3955777535,
+STORE, 3955777536, 3955982335,
+STORE, 3955982336, 3956011007,
+STORE, 3956011008, 3956015103,
+STORE, 3956023296, 3956039679,
+STORE, 3956039680, 3956125695,
+STORE, 3956125696, 3956129791,
+STORE, 3956129792, 3956133887,
+STORE, 3956133888, 3956137983,
+STORE, 3956142080, 3956449279,
+STORE, 3956449280, 3956543487,
+STORE, 3956543488, 3956719615,
+STORE, 3956719616, 3956731903,
+STORE, 3956731904, 3956735999,
+STORE, 3956744192, 3956793343,
+STORE, 3956793344, 3956887551,
+STORE, 3956887552, 3956953087,
+STORE, 3956953088, 3957035007,
+STORE, 3957035008, 3957039103,
+STORE, 3957039104, 3957047295,
+STORE, 3957047296, 3957071871,
+STORE, 3957071872, 3957231615,
+STORE, 3957231616, 3957563391,
+STORE, 3957563392, 3957579775,
+STORE, 3957579776, 3957583871,
+STORE, 3957592064, 3957608447,
+STORE, 3957608448, 3957878783,
+STORE, 3957878784, 3958591487,
+STORE, 3958591488, 3958599679,
+STORE, 3958599680, 3958607871,
+STORE, 3958607872, 3958620159,
+STORE, 3958620160, 3958624255,
+STORE, 3958624256, 3963199487,
+STORE, 3963199488, 3963285503,
+STORE, 3963285504, 3963371519,
+STORE, 3963371520, 3963428863,
+STORE, 3963428864, 3963555839,
+STORE, 3963555840, 3963559935,
+STORE, 3963559936, 3963564031,
+STORE, 3963568128, 3963596799,
+STORE, 3963596800, 3963682815,
+STORE, 3963682816, 3963695103,
+STORE, 3963695104, 3963711487,
+STORE, 3963711488, 3963715583,
+STORE, 3963719680, 3963752447,
+STORE, 3963752448, 3963846655,
+STORE, 3963846656, 3963932671,
+STORE, 3963932672, 3964444671,
+STORE, 3964444672, 3964448767,
+STORE, 3964448768, 3965808639,
+STORE, 3965808640, 3965845503,
+STORE, 3965845504, 3965849599,
+STORE, 3965853696, 3965935615,
+STORE, 3965935616, 3966017535,
+STORE, 3966017536, 3966103551,
+STORE, 3966103552, 3966685183,
+STORE, 3966685184, 3967705087,
+STORE, 3967705088, 3967758335,
+STORE, 3967758336, 3967762431,
+STORE, 3967762432, 3967770623,
+STORE, 3967770624, 3967799295,
+STORE, 3967799296, 3967848447,
+STORE, 3967848448, 3967868927,
+STORE, 3967868928, 3967901695,
+STORE, 3967901696, 3967905791,
+STORE, 3967905792, 3967909887,
+STORE, 3967909888, 3967995903,
+STORE, 3967995904, 3968077823,
+STORE, 3968077824, 3968159743,
+STORE, 3968159744, 3968167935,
+STORE, 3968167936, 3968172031,
+STORE, 3968172032, 3968192511,
+STORE, 3968192512, 3968196607,
+STORE, 3968196608, 3968200703,
+STORE, 3968208896, 3968516095,
+STORE, 3968516096, 3968528383,
+STORE, 3968528384, 3968552959,
+STORE, 3968552960, 3968557055,
+STORE, 3968561152, 3968593919,
+STORE, 3968593920, 3968626687,
+STORE, 3968626688, 3971153919,
+STORE, 3971153920, 3973754879,
+STORE, 3973754880, 3973804031,
+STORE, 3973804032, 3973820415,
+STORE, 3973820416, 3973832703,
+STORE, 3973840896, 3973873663,
+STORE, 3973873664, 3973967871,
+STORE, 3973967872, 3973976063,
+STORE, 3973976064, 3973984255,
+STORE, 3973984256, 3973988351,
+STORE, 3973988352, 3973992447,
+STORE, 3973996544, 3974008831,
+STORE, 3974008832, 3974045695,
+STORE, 3974045696, 3974139903,
+STORE, 3974139904, 3974254591,
+STORE, 3974254592, 3974275071,
+STORE, 3974275072, 3974291455,
+STORE, 3974291456, 3974295551,
+STORE, 3974295552, 3974373375,
+STORE, 3974373376, 3974524927,
+STORE, 3974524928, 3974529023,
+STORE, 3974529024, 3974537215,
+STORE, 3974537216, 3974541311,
+STORE, 3974541312, 3974545407,
+STORE, 3974545408, 3974627327,
+STORE, 3974627328, 3974680575,
+STORE, 3974680576, 3974811647,
+STORE, 3974811648, 3974819839,
+STORE, 3974819840, 3974823935,
+STORE, 3974832128, 3974918143,
+STORE, 3974918144, 3974963199,
+STORE, 3974963200, 3975077887,
+STORE, 3975077888, 3975090175,
+STORE, 3975090176, 3975094271,
+STORE, 3975094272, 3975102463,
+STORE, 3975102464, 3975114751,
+STORE, 3975114752, 3975266303,
+STORE, 3975266304, 3975274495,
+STORE, 3975274496, 3975286783,
+STORE, 3975286784, 3975290879,
+STORE, 3975290880, 3975299071,
+STORE, 3975299072, 3975315455,
+STORE, 3975315456, 3975430143,
+STORE, 3975430144, 3975536639,
+STORE, 3975536640, 3975651327,
+STORE, 3975651328, 3975655423,
+STORE, 3975655424, 3975659519,
+STORE, 3975659520, 3975770111,
+STORE, 3975770112, 3975778303,
+STORE, 3975778304, 3975790591,
+STORE, 3975790592, 3975794687,
+STORE, 3975794688, 3975798783,
+STORE, 3975798784, 3975831551,
+STORE, 3975831552, 3975872511,
+STORE, 3975872512, 3975987199,
+STORE, 3975987200, 3976134655,
+STORE, 3976134656, 3977175039,
+STORE, 3977175040, 3977183231,
+STORE, 3977183232, 3977191423,
+STORE, 3977191424, 3977195519,
+STORE, 3977199616, 3977248767,
+STORE, 3977248768, 3977539583,
+STORE, 3977539584, 3977965567,
+STORE, 3977965568, 3977981951,
+STORE, 3977981952, 3977986047,
+STORE, 3977986048, 3977994239,
+STORE, 3977994240, 3978002431,
+STORE, 3978002432, 3978084351,
+STORE, 3978084352, 3978125311,
+STORE, 3978125312, 3978174463,
+STORE, 3978174464, 3978178559,
+STORE, 3978178560, 3978182655,
+STORE, 3978182656, 3978207231,
+STORE, 3978207232, 3978297343,
+STORE, 3978297344, 3978301439,
+STORE, 3978301440, 3978305535,
+STORE, 3978305536, 3978309631,
+STORE, 3978309632, 3978317823,
+STORE, 3978317824, 3978625023,
+STORE, 3978625024, 3978657791,
+STORE, 3978657792, 3978727423,
+STORE, 3978727424, 3978735615,
+STORE, 3978735616, 3978739711,
+STORE, 3978739712, 3978760191,
+STORE, 3978760192, 3978842111,
+STORE, 3978842112, 3978850303,
+STORE, 3978850304, 3978858495,
+STORE, 3978858496, 3978862591,
+STORE, 3978862592, 3978895359,
+STORE, 3978895360, 3979014143,
+STORE, 3979014144, 3979132927,
+STORE, 3979132928, 3979288575,
+STORE, 3979288576, 3979481087,
+STORE, 3979481088, 3979489279,
+STORE, 3979489280, 3979493375,
+STORE, 3979497472, 3979583487,
+STORE, 3979583488, 3979673599,
+STORE, 3979673600, 3979718655,
+STORE, 3979718656, 3979829247,
+STORE, 3979829248, 3979841535,
+STORE, 3979841536, 3979882495,
+STORE, 3979882496, 3979964415,
+STORE, 3979964416, 3980013567,
+STORE, 3980013568, 3980148735,
+STORE, 3980148736, 3980152831,
+STORE, 3980152832, 3980320767,
+STORE, 3980320768, 3980337151,
+STORE, 3980337152, 3980341247,
+STORE, 3980345344, 3980365823,
+STORE, 3980365824, 3980423167,
+STORE, 3980423168, 3980460031,
+STORE, 3980460032, 3980500991,
+STORE, 3980500992, 3980509183,
+STORE, 3980509184, 3980513279,
+STORE, 3980513280, 3980546047,
+STORE, 3980546048, 3980660735,
+STORE, 3980660736, 3980951551,
+STORE, 3980951552, 3981500415,
+STORE, 3981500416, 3981529087,
+STORE, 3981529088, 3981533183,
+STORE, 3981537280, 3981549567,
+STORE, 3981549568, 3981598719,
+STORE, 3981598720, 3981717503,
+STORE, 3981717504, 3982127103,
+STORE, 3982127104, 3982675967,
+STORE, 3982675968, 3982733311,
+STORE, 3982733312, 3982737407,
+STORE, 3982741504, 3982860287,
+STORE, 3982860288, 3982905343,
+STORE, 3982905344, 3982966783,
+STORE, 3982966784, 3982974975,
+STORE, 3982974976, 3982979071,
+STORE, 3982979072, 3983032319,
+STORE, 3983032320, 3983085567,
+STORE, 3983085568, 3983208447,
+STORE, 3983208448, 3983212543,
+STORE, 3983212544, 3983220735,
+STORE, 3983220736, 3983224831,
+STORE, 3983224832, 3983237119,
+STORE, 3983237120, 3983351807,
+STORE, 3983351808, 3983376383,
+STORE, 3983376384, 3983392767,
+STORE, 3983392768, 3983396863,
+STORE, 3983396864, 3983400959,
+STORE, 3983400960, 3983417343,
+STORE, 3983417344, 3983753215,
+STORE, 3983753216, 3983757311,
+STORE, 3983757312, 3983761407,
+STORE, 3983761408, 3983765503,
+STORE, 3983765504, 3983769599,
+STORE, 3983769600, 3983880191,
+STORE, 3983880192, 3983892479,
+STORE, 3983892480, 3983900671,
+STORE, 3983900672, 3983904767,
+STORE, 3983904768, 3983908863,
+STORE, 3983908864, 3983941631,
+STORE, 3983941632, 3983990783,
+STORE, 3983990784, 3984097279,
+STORE, 3984097280, 3984105471,
+STORE, 3984105472, 3984117759,
+STORE, 3984117760, 3984121855,
+STORE, 3984121856, 3984125951,
+STORE, 3984125952, 3984134143,
+STORE, 3984134144, 3984150527,
+STORE, 3984150528, 3984416767,
+STORE, 3984416768, 3984470015,
+STORE, 3984470016, 3984564223,
+STORE, 3984564224, 3984568319,
+STORE, 3984572416, 3984629759,
+STORE, 3984629760, 3984805887,
+STORE, 3984805888, 3985096703,
+STORE, 3985096704, 3985104895,
+STORE, 3985104896, 3985108991,
+STORE, 3985113088, 3986862079,
+STORE, 3986862080, 3993640959,
+STORE, 3993640960, 3993739263,
+STORE, 3993739264, 3993743359,
+STORE, 3993743360, 3993759743,
+STORE, 3993759744, 3993780223,
+STORE, 3993780224, 3993784319,
+STORE, 3993784320, 3993792511,
+STORE, 3993792512, 3993796607,
+STORE, 3993796608, 3993800703,
+STORE, 3993804800, 3994214399,
+STORE, 3994214400, 3994218495,
+STORE, 3994218496, 3994222591,
+STORE, 3994222592, 3994226687,
+STORE, 3994230784, 3994243071,
+STORE, 3994243072, 3994255359,
+STORE, 3994255360, 3994304511,
+STORE, 3994304512, 3994386431,
+STORE, 3994386432, 3994509311,
+STORE, 3994509312, 3994521599,
+STORE, 3994521600, 3994525695,
+STORE, 3994529792, 3994542079,
+STORE, 3994542080, 3994660863,
+STORE, 3994660864, 3994705919,
+STORE, 3994705920, 3994796031,
+STORE, 3994796032, 3994800127,
+STORE, 3994800128, 3994804223,
+STORE, 3994804224, 3994812415,
+STORE, 3994812416, 3994845183,
+STORE, 3994845184, 3994898431,
+STORE, 3994898432, 3994902527,
+STORE, 3994902528, 3994906623,
+STORE, 3994910720, 3994931199,
+STORE, 3994931200, 3995181055,
+STORE, 3995181056, 3995222015,
+STORE, 3995222016, 3995275263,
+STORE, 3995275264, 3995279359,
+STORE, 3995279360, 3995283455,
+STORE, 3995283456, 3995291647,
+STORE, 3995291648, 3995324415,
+STORE, 3995324416, 3995451391,
+STORE, 3995451392, 3995697151,
+STORE, 3995697152, 3996078079,
+STORE, 3996078080, 3996086271,
+STORE, 3996086272, 3996090367,
+STORE, 3996094464, 3996119039,
+STORE, 3996119040, 3996200959,
+STORE, 3996200960, 3996229631,
+STORE, 3996229632, 3996233727,
+STORE, 3996233728, 3996282879,
+STORE, 3996282880, 3996291071,
+STORE, 3996291072, 3996295167,
+STORE, 3996299264, 3996311551,
+STORE, 3996311552, 3996430335,
+STORE, 3996430336, 3996467199,
+STORE, 3996467200, 3996504063,
+STORE, 3996504064, 3996512255,
+STORE, 3996512256, 3996516351,
+STORE, 3996516352, 3996540927,
+STORE, 3996540928, 3996671999,
+STORE, 3996672000, 3996676095,
+STORE, 3996676096, 3996684287,
+STORE, 3996684288, 3996688383,
+STORE, 3996688384, 3996692479,
+STORE, 3996692480, 3996717055,
+STORE, 3996717056, 3997048831,
+STORE, 3997048832, 3997057023,
+STORE, 3997057024, 3997073407,
+STORE, 3997073408, 3997077503,
+STORE, 3997077504, 3997081599,
+STORE, 3997081600, 3997097983,
+STORE, 3997097984, 3997179903,
+STORE, 3997179904, 3997356031,
+STORE, 3997356032, 3997650943,
+STORE, 3997650944, 3997675519,
+STORE, 3997675520, 3997679615,
+STORE, 3997683712, 3997700095,
+STORE, 3997700096, 3997745151,
+STORE, 3997745152, 3997802495,
+STORE, 3997802496, 3997810687,
+STORE, 3997810688, 3997814783,
+STORE, 3997814784, 3998064639,
+STORE, 3998064640, 3998081023,
+STORE, 3998081024, 3998085119,
+STORE, 3998085120, 3998130175,
+STORE, 3998130176, 3998134271,
+STORE, 3998134272, 3998142463,
+STORE, 3998142464, 3998179327,
+STORE, 3998179328, 3998212095,
+STORE, 3998212096, 3998326783,
+STORE, 3998326784, 3998351359,
+STORE, 3998351360, 3998392319,
+STORE, 3998392320, 3998396415,
+STORE, 3998396416, 3998400511,
+STORE, 3998400512, 3998433279,
+STORE, 3998433280, 3998466047,
+STORE, 3998466048, 3998613503,
+STORE, 3998613504, 3998666751,
+STORE, 3998666752, 3998724095,
+STORE, 3998724096, 3998732287,
+STORE, 3998732288, 3998736383,
+STORE, 3998736384, 3998760959,
+STORE, 3998760960, 3998777343,
+STORE, 3998777344, 3998822399,
+STORE, 3998822400, 3998826495,
+STORE, 3998826496, 3998830591,
+STORE, 3998830592, 3998863359,
+STORE, 3998863360, 3998900223,
+STORE, 3998900224, 3999043583,
+STORE, 3999043584, 3999121407,
+STORE, 3999121408, 3999215615,
+STORE, 3999215616, 3999223807,
+STORE, 3999223808, 3999227903,
+STORE, 3999227904, 3999236095,
+STORE, 3999236096, 3999268863,
+STORE, 3999268864, 3999301631,
+STORE, 3999301632, 3999354879,
+STORE, 3999354880, 3999428607,
+STORE, 3999428608, 3999436799,
+STORE, 3999436800, 3999440895,
+STORE, 3999444992, 3999461375,
+STORE, 3999461376, 3999584255,
+STORE, 3999584256, 3999760383,
+STORE, 3999760384, 4000219135,
+STORE, 4000219136, 4000235519,
+STORE, 4000235520, 4000251903,
+STORE, 4000251904, 4000501759,
+STORE, 4000501760, 4000505855,
+STORE, 4000505856, 4000509951,
+STORE, 4000509952, 4000518143,
+STORE, 4000518144, 4000522239,
+STORE, 4000522240, 4000587775,
+STORE, 4000587776, 4000645119,
+STORE, 4000645120, 4000813055,
+STORE, 4000813056, 4000817151,
+STORE, 4000821248, 4000837631,
+STORE, 4000837632, 4000870399,
+STORE, 4000870400, 4000874495,
+STORE, 4000874496, 4000878591,
+STORE, 4000878592, 4000882687,
+STORE, 4000882688, 4000886783,
+STORE, 4000886784, 4000890879,
+STORE, 4000890880, 4000907263,
+STORE, 4000907264, 4001214463,
+STORE, 4001214464, 4001558527,
+STORE, 4001558528, 4002484223,
+STORE, 4002484224, 4002525183,
+STORE, 4002525184, 4002529279,
+STORE, 4002529280, 4002533375,
+STORE, 4002533376, 4002537471,
+STORE, 4002537472, 4002660351,
+STORE, 4002660352, 4002779135,
+STORE, 4002779136, 4002791423,
+STORE, 4002791424, 4002799615,
+STORE, 4002799616, 4002807807,
+STORE, 4002807808, 4002811903,
+STORE, 4002811904, 4002828287,
+STORE, 4002828288, 4002910207,
+STORE, 4002910208, 4003028991,
+STORE, 4003028992, 4003037183,
+STORE, 4003037184, 4003045375,
+STORE, 4003045376, 4003049471,
+STORE, 4003049472, 4003053567,
+STORE, 4003053568, 4003057663,
+STORE, 4003057664, 4003065855,
+STORE, 4003065856, 4003135487,
+STORE, 4003135488, 4003446783,
+STORE, 4003446784, 4003450879,
+STORE, 4003450880, 4003454975,
+STORE, 4003454976, 4003459071,
+STORE, 4003459072, 4003463167,
+STORE, 4003463168, 4003495935,
+STORE, 4003495936, 4003569663,
+STORE, 4003569664, 4003573759,
+STORE, 4003573760, 4003704831,
+STORE, 4003704832, 4003708927,
+STORE, 4003708928, 4003713023,
+STORE, 4003713024, 4003737599,
+STORE, 4003737600, 4003770367,
+STORE, 4003770368, 4003876863,
+STORE, 4003876864, 4003880959,
+STORE, 4003880960, 4003885055,
+STORE, 4003885056, 4003889151,
+STORE, 4003889152, 4003893247,
+STORE, 4003893248, 4003897343,
+STORE, 4003897344, 4003962879,
+STORE, 4003962880, 4004069375,
+STORE, 4004069376, 4004093951,
+STORE, 4004093952, 4004118527,
+STORE, 4004118528, 4004122623,
+STORE, 4004122624, 4004126719,
+STORE, 4004126720, 4004155391,
+STORE, 4004155392, 4004286463,
+STORE, 4004286464, 4004384767,
+STORE, 4004384768, 4004388863,
+STORE, 4004388864, 4004646911,
+STORE, 4004646912, 4004655103,
+STORE, 4004655104, 4004659199,
+STORE, 4004659200, 4004667391,
+STORE, 4004667392, 4004683775,
+STORE, 4004683776, 4004814847,
+STORE, 4004814848, 4004818943,
+STORE, 4004818944, 4004823039,
+STORE, 4004823040, 4004827135,
+STORE, 4004827136, 4004835327,
+STORE, 4004835328, 4004954111,
+STORE, 4004954112, 4005085183,
+STORE, 4005085184, 4005306367,
+STORE, 4005306368, 4005765119,
+STORE, 4005765120, 4005789695,
+STORE, 4005789696, 4005793791,
+STORE, 4005793792, 4005801983,
+STORE, 4005801984, 4005920767,
+STORE, 4005920768, 4005945343,
+STORE, 4005945344, 4005949439,
+STORE, 4005949440, 4005986303,
+STORE, 4005986304, 4005990399,
+STORE, 4005990400, 4005994495,
+STORE, 4005994496, 4006002687,
+STORE, 4006002688, 4006109183,
+STORE, 4006109184, 4006117375,
+STORE, 4006117376, 4006121471,
+STORE, 4006121472, 4006133759,
+STORE, 4006133760, 4006137855,
+STORE, 4006137856, 4006141951,
+STORE, 4006141952, 4006150143,
+STORE, 4006150144, 4006391807,
+STORE, 4006391808, 4006445055,
+STORE, 4006445056, 4006563839,
+STORE, 4006563840, 4006572031,
+STORE, 4006572032, 4006576127,
+STORE, 4006576128, 4006584319,
+STORE, 4006584320, 4006694911,
+STORE, 4006694912, 4006739967,
+STORE, 4006739968, 4006776831,
+STORE, 4006776832, 4006785023,
+STORE, 4006785024, 4006789119,
+STORE, 4006789120, 4006797311,
+STORE, 4006797312, 4006813695,
+STORE, 4006813696, 4006846463,
+STORE, 4006846464, 4006977535,
+STORE, 4006977536, 4007006207,
+STORE, 4007006208, 4007010303,
+STORE, 4007010304, 4007067647,
+STORE, 4007067648, 4007075839,
+STORE, 4007075840, 4007084031,
+STORE, 4007084032, 4007100415,
+STORE, 4007100416, 4007116799,
+STORE, 4007116800, 4007133183,
+STORE, 4007133184, 4007153663,
+STORE, 4007153664, 4007178239,
+STORE, 4007178240, 4007202815,
+STORE, 4007202816, 4007206911,
+STORE, 4007206912, 4007272447,
+STORE, 4007272448, 4007276543,
+STORE, 4007276544, 4007280639,
+STORE, 4007280640, 4007284735,
+STORE, 4007284736, 4007292927,
+STORE, 4007292928, 4007423999,
+STORE, 4007424000, 4007448575,
+STORE, 4007448576, 4007452671,
+STORE, 4007452672, 4007505919,
+STORE, 4007505920, 4007510015,
+STORE, 4007510016, 4007514111,
+STORE, 4007514112, 4007645183,
+STORE, 4007645184, 4007776255,
+STORE, 4007776256, 4007780351,
+STORE, 4007780352, 4007784447,
+STORE, 4007784448, 4007788543,
+STORE, 4007788544, 4007809023,
+STORE, 4007809024, 4007829503,
+STORE, 4007829504, 4007960575,
+STORE, 4007960576, 4008091647,
+STORE, 4008091648, 4008296447,
+STORE, 4008296448, 4008890367,
+STORE, 4008890368, 4008898559,
+STORE, 4008898560, 4008902655,
+STORE, 4008902656, 4008996863,
+STORE, 4008996864, 4009041919,
+STORE, 4009041920, 4009082879,
+STORE, 4009082880, 4009091071,
+STORE, 4009091072, 4009107455,
+STORE, 4009107456, 4009349119,
+STORE, 4009349120, 4009373695,
+STORE, 4009373696, 4009414655,
+STORE, 4009414656, 4009422847,
+STORE, 4009422848, 4009426943,
+STORE, 4009426944, 4009447423,
+STORE, 4009447424, 4009471999,
+STORE, 4009472000, 4009512959,
+STORE, 4009512960, 4009594879,
+STORE, 4009594880, 4009598975,
+STORE, 4009598976, 4009697279,
+STORE, 4009697280, 4009713663,
+STORE, 4009713664, 4009717759,
+STORE, 4009717760, 4009721855,
+STORE, 4009721856, 4009730047,
+STORE, 4009730048, 4009861119,
+STORE, 4009861120, 4009951231,
+STORE, 4009951232, 4010131455,
+STORE, 4010131456, 4010135551,
+STORE, 4010135552, 4010139647,
+STORE, 4010139648, 4010143743,
+STORE, 4010143744, 4010164223,
+STORE, 4010164224, 4010295295,
+STORE, 4010295296, 4010299391,
+STORE, 4010299392, 4010491903,
+STORE, 4010491904, 4010495999,
+STORE, 4010496000, 4010668031,
+STORE, 4010668032, 4011028479,
+STORE, 4011028480, 4011053055,
+STORE, 4011053056, 4011057151,
+STORE, 4011057152, 4011118591,
+STORE, 4011118592, 4011126783,
+STORE, 4011126784, 4011130879,
+STORE, 4011130880, 4011143167,
+STORE, 4011143168, 4011147263,
+STORE, 4011147264, 4011167743,
+STORE, 4011167744, 4011171839,
+STORE, 4011171840, 4011360255,
+STORE, 4011360256, 4011364351,
+STORE, 4011364352, 4011626495,
+STORE, 4011626496, 4012216319,
+STORE, 4012216320, 4012228607,
+STORE, 4012228608, 4012232703,
+STORE, 4012232704, 4012236799,
+STORE, 4012236800, 4012240895,
+STORE, 4012240896, 4012261375,
+STORE, 4012261376, 4012392447,
+STORE, 4012392448, 4012466175,
+STORE, 4012466176, 4012597247,
+STORE, 4012597248, 4012601343,
+STORE, 4012601344, 4012605439,
+STORE, 4012605440, 4012609535,
+STORE, 4012609536, 4012679167,
+STORE, 4012679168, 4013563903,
+STORE, 4013563904, 4015366143,
+STORE, 4015366144, 4015411199,
+STORE, 4015411200, 4015415295,
+STORE, 4015415296, 4015419391,
+STORE, 4015419392, 4015542271,
+STORE, 4015542272, 4015550463,
+STORE, 4015550464, 4015558655,
+STORE, 4015558656, 4015562751,
+STORE, 4015562752, 4015583231,
+STORE, 4015583232, 4015587327,
+STORE, 4015587328, 4015603711,
+STORE, 4015665152, 4015669247,
+STORE, 4015669248, 4015812607,
+STORE, 4015812608, 4015816703,
+STORE, 4015816704, 4016111615,
+STORE, 4016111616, 4016467967,
+STORE, 4016467968, 4016508927,
+STORE, 4016508928, 4016517119,
+STORE, 4016517120, 4016525311,
+STORE, 4016525312, 4016586751,
+STORE, 4016586752, 4016664575,
+STORE, 4016664576, 4016697343,
+STORE, 4016697344, 4016742399,
+STORE, 4016742400, 4016746495,
+STORE, 4016746496, 4016750591,
+STORE, 4016750592, 4016758783,
+STORE, 4016799744, 4016844799,
+STORE, 4016844800, 4016902143,
+STORE, 4016902144, 4016992255,
+STORE, 4016992256, 4017000447,
+STORE, 4017000448, 4017004543,
+STORE, 4017004544, 4017008639,
+STORE, 4017008640, 4017016831,
+STORE, 4017016832, 4017020927,
+STORE, 4017020928, 4017127423,
+STORE, 4017127424, 4017131519,
+STORE, 4017131520, 4017229823,
+STORE, 4017229824, 4017422335,
+STORE, 4017422336, 4017438719,
+STORE, 4017438720, 4017442815,
+STORE, 4017442816, 4017446911,
+STORE, 4017446912, 4017455103,
+STORE, 4017455104, 4017766399,
+STORE, 4017766400, 4017909759,
+STORE, 4017909760, 4018081791,
+STORE, 4018081792, 4018089983,
+STORE, 4018089984, 4018094079,
+STORE, 4018094080, 4018098175,
+STORE, 4018098176, 4018327551,
+STORE, 4018327552, 4018331647,
+STORE, 4018331648, 4018339839,
+STORE, 4018339840, 4018348031,
+STORE, 4018348032, 4018610175,
+STORE, 4018610176, 4018626559,
+STORE, 4018626560, 4018647039,
+STORE, 4018647040, 4018651135,
+STORE, 4018651136, 4018749439,
+STORE, 4018749440, 4018761727,
+STORE, 4018761728, 4018802687,
+STORE, 4018802688, 4018806783,
+STORE, 4018806784, 4018810879,
+STORE, 4018810880, 4018814975,
+STORE, 4018814976, 4018823167,
+STORE, 4018823168, 4018954239,
+STORE, 4018954240, 4019007487,
+STORE, 4019007488, 4019068927,
+STORE, 4019068928, 4019077119,
+STORE, 4019077120, 4019081215,
+STORE, 4019081216, 4019093503,
+STORE, 4019093504, 4019208191,
+STORE, 4019208192, 4019232767,
+STORE, 4019232768, 4019265535,
+STORE, 4019265536, 4019269631,
+STORE, 4019269632, 4019277823,
+STORE, 4019277824, 4019458047,
+STORE, 4019458048, 4019519487,
+STORE, 4019519488, 4019613695,
+STORE, 4019613696, 4019621887,
+STORE, 4019621888, 4019625983,
+STORE, 4019625984, 4019630079,
+STORE, 4019630080, 4019744767,
+STORE, 4019744768, 4019822591,
+STORE, 4019822592, 4019929087,
+STORE, 4019929088, 4019941375,
+STORE, 4019941376, 4019945471,
+STORE, 4019945472, 4019961855,
+STORE, 4019961856, 4019994623,
+STORE, 4019994624, 4019998719,
+STORE, 4019998720, 4020002815,
+STORE, 4020002816, 4020006911,
+STORE, 4020006912, 4020011007,
+STORE, 4020011008, 4020256767,
+STORE, 4020256768, 4020326399,
+STORE, 4020326400, 4020457471,
+STORE, 4020457472, 4020469759,
+STORE, 4020469760, 4020473855,
+STORE, 4020473856, 4020482047,
+STORE, 4020482048, 4020711423,
+STORE, 4020711424, 4020715519,
+STORE, 4020715520, 4020719615,
+STORE, 4020719616, 4020723711,
+STORE, 4020723712, 4020805631,
+STORE, 4020805632, 4021051391,
+STORE, 4021051392, 4021460991,
+STORE, 4021460992, 4021469183,
+STORE, 4021469184, 4021473279,
+STORE, 4021473280, 4021571583,
+STORE, 4021571584, 4021633023,
+STORE, 4021633024, 4021727231,
+STORE, 4021727232, 4021735423,
+STORE, 4021735424, 4021739519,
+STORE, 4021739520, 4021747711,
+STORE, 4021747712, 4021829631,
+STORE, 4021829632, 4021866495,
+STORE, 4021866496, 4021919743,
+STORE, 4021919744, 4021927935,
+STORE, 4021927936, 4021932031,
+STORE, 4021932032, 4021944319,
+STORE, 4021944320, 4022157311,
+STORE, 4022157312, 4022161407,
+STORE, 4022161408, 4022173695,
+STORE, 4022173696, 4022177791,
+STORE, 4022177792, 4022472703,
+STORE, 4022472704, 4022509567,
+STORE, 4022509568, 4022583295,
+STORE, 4022583296, 4022587391,
+STORE, 4022587392, 4022591487,
+STORE, 4022591488, 4022607871,
+STORE, 4022607872, 4022657023,
+STORE, 4022657024, 4022722559,
+STORE, 4022722560, 4022730751,
+STORE, 4022730752, 4022734847,
+STORE, 4022734848, 4022865919,
+STORE, 4022865920, 4022943743,
+STORE, 4022943744, 4023062527,
+STORE, 4023062528, 4023074815,
+STORE, 4023074816, 4023078911,
+STORE, 4023078912, 4023128063,
+STORE, 4023128064, 4023218175,
+STORE, 4023218176, 4023361535,
+STORE, 4023361536, 4023373823,
+STORE, 4023373824, 4023377919,
+STORE, 4023377920, 4023558143,
+STORE, 4023558144, 4023631871,
+STORE, 4023631872, 4023816191,
+STORE, 4023816192, 4023820287,
+STORE, 4023820288, 4023824383,
+STORE, 4023824384, 4023832575,
+STORE, 4023832576, 4024078335,
+STORE, 4024078336, 4024197119,
+STORE, 4024197120, 4024389631,
+STORE, 4024389632, 4024406015,
+STORE, 4024406016, 4024410111,
+STORE, 4024410112, 4024422399,
+STORE, 4024422400, 4024619007,
+STORE, 4024619008, 4024639487,
+STORE, 4024639488, 4024655871,
+STORE, 4024655872, 4024664063,
+STORE, 4024664064, 4024668159,
+STORE, 4024668160, 4024676351,
+STORE, 4024676352, 4024905727,
+STORE, 4024905728, 4024909823,
+STORE, 4024909824, 4024918015,
+STORE, 4024918016, 4024922111,
+STORE, 4024922112, 4024930303,
+STORE, 4024930304, 4025110527,
+STORE, 4025110528, 4025176063,
+STORE, 4025176064, 4025208831,
+STORE, 4025208832, 4025212927,
+STORE, 4025212928, 4025217023,
+STORE, 4025217024, 4025348095,
+STORE, 4025348096, 4025372671,
+STORE, 4025372672, 4025458687,
+STORE, 4025458688, 4025466879,
+STORE, 4025466880, 4025565183,
+STORE, 4025565184, 4025757695,
+STORE, 4025757696, 4026249215,
+STORE, 4026249216, 4026261503,
+STORE, 4026261504, 4026265599,
+STORE, 4026265600, 4026269695,
+STORE, 4026269696, 4026302463,
+STORE, 4026302464, 4026306559,
+STORE, 4026306560, 4026314751,
+STORE, 4026314752, 4026318847,
+STORE, 4026318848, 4026322943,
+STORE, 4026322944, 4026327039,
+STORE, 4026327040, 4026654719,
+STORE, 4026654720, 4026671103,
+STORE, 4026671104, 4026720255,
+STORE, 4026720256, 4026724351,
+STORE, 4026724352, 4026728447,
+STORE, 4026728448, 4026732543,
+STORE, 4026732544, 4026863615,
+STORE, 4026863616, 4027027455,
+STORE, 4027027456, 4027031551,
+STORE, 4027031552, 4027514879,
+STORE, 4027514880, 4027531263,
+STORE, 4027531264, 4027535359,
+STORE, 4027535360, 4027539455,
+STORE, 4027539456, 4027785215,
+STORE, 4027785216, 4027789311,
+STORE, 4027789312, 4027793407,
+STORE, 4027793408, 4027797503,
+STORE, 4027797504, 4027863039,
+STORE, 4027863040, 4027899903,
+STORE, 4027899904, 4027949055,
+STORE, 4027949056, 4027957247,
+STORE, 4027957248, 4027961343,
+STORE, 4027961344, 4027965439,
+STORE, 4027965440, 4028194815,
+STORE, 4028194816, 4028252159,
+STORE, 4028252160, 4028338175,
+STORE, 4028338176, 4028350463,
+STORE, 4028350464, 4028354559,
+STORE, 4028354560, 4028452863,
+STORE, 4028452864, 4028489727,
+STORE, 4028489728, 4028530687,
+STORE, 4028530688, 4028538879,
+STORE, 4028538880, 4028542975,
+STORE, 4028542976, 4028551167,
+STORE, 4028551168, 4028665855,
+STORE, 4028665856, 4029349887,
+STORE, 4029349888, 4030468095,
+STORE, 4030468096, 4030513151,
+STORE, 4030513152, 4030517247,
+STORE, 4030517248, 4030525439,
+STORE, 4030525440, 4030529535,
+STORE, 4030529536, 4030758911,
+STORE, 4030758912, 4030828543,
+STORE, 4030828544, 4030943231,
+STORE, 4030943232, 4030951423,
+STORE, 4030951424, 4030955519,
+STORE, 4030955520, 4030967807,
+STORE, 4030967808, 4031131647,
+STORE, 4031131648, 4031135743,
+STORE, 4031135744, 4031139839,
+STORE, 4031139840, 4031148031,
+STORE, 4031148032, 4031152127,
+STORE, 4031152128, 4031160319,
+STORE, 4031160320, 4031504383,
+STORE, 4031504384, 4031598591,
+STORE, 4031598592, 4031754239,
+STORE, 4031754240, 4031766527,
+STORE, 4031766528, 4031770623,
+STORE, 4031770624, 4031774719,
+STORE, 4031774720, 4031782911,
+STORE, 4031782912, 4031799295,
+STORE, 4031799296, 4031856639,
+STORE, 4031856640, 4031983615,
+STORE, 4031983616, 4031987711,
+STORE, 4031987712, 4031991807,
+STORE, 4031991808, 4032270335,
+STORE, 4032270336, 4032274431,
+STORE, 4032274432, 4032282623,
+STORE, 4032282624, 4032286719,
+STORE, 4032286720, 4032290815,
+STORE, 4032290816, 4032389119,
+STORE, 4032389120, 4032397311,
+STORE, 4032397312, 4032405503,
+STORE, 4032405504, 4032413695,
+STORE, 4032413696, 4032417791,
+STORE, 4032417792, 4032565247,
+STORE, 4032565248, 4032593919,
+STORE, 4032593920, 4032737279,
+STORE, 4032737280, 4032741375,
+STORE, 4032741376, 4032745471,
+STORE, 4032745472, 4032770047,
+STORE, 4032770048, 4032933887,
+STORE, 4032933888, 4032999423,
+STORE, 4032999424, 4033032191,
+STORE, 4033032192, 4033036287,
+STORE, 4033036288, 4033040383,
+STORE, 4033040384, 4033105919,
+STORE, 4033105920, 4033396735,
+STORE, 4033396736, 4033822719,
+STORE, 4033822720, 4033839103,
+STORE, 4033839104, 4033843199,
+STORE, 4033843200, 4033851391,
+STORE, 4033851392, 4033863679,
+STORE, 4033863680, 4033880063,
+STORE, 4033880064, 4033933311,
+STORE, 4033933312, 4034023423,
+STORE, 4034023424, 4034031615,
+STORE, 4034031616, 4034035711,
+STORE, 4034035712, 4034043903,
+STORE, 4034043904, 4034142207,
+STORE, 4034142208, 4034191359,
+STORE, 4034191360, 4034260991,
+STORE, 4034260992, 4034269183,
+STORE, 4034269184, 4034273279,
+STORE, 4034273280, 4034281471,
+STORE, 4034281472, 4034412543,
+STORE, 4034412544, 4034445311,
+STORE, 4034445312, 4034490367,
+STORE, 4034490368, 4034494463,
+STORE, 4034494464, 4034498559,
+STORE, 4034498560, 4034662399,
+STORE, 4034662400, 4034666495,
+STORE, 4034666496, 4034670591,
+STORE, 4034670592, 4034674687,
+STORE, 4034674688, 4034678783,
+STORE, 4034678784, 4034682879,
+STORE, 4034682880, 4034781183,
+STORE, 4034781184, 4035043327,
+STORE, 4035043328, 4035047423,
+STORE, 4035047424, 4035055615,
+STORE, 4035055616, 4035059711,
+STORE, 4035059712, 4035063807,
+STORE, 4035063808, 4035067903,
+STORE, 4035067904, 4035100671,
+STORE, 4035100672, 4035375103,
+STORE, 4035375104, 4035383295,
+STORE, 4035383296, 4035395583,
+STORE, 4035395584, 4035399679,
+STORE, 4035399680, 4035403775,
+STORE, 4035403776, 4035407871,
+STORE, 4035407872, 4035411967,
+STORE, 4035411968, 4035477503,
+STORE, 4035477504, 4035608575,
+STORE, 4035608576, 4035641343,
+STORE, 4035641344, 4035682303,
+STORE, 4035682304, 4035686399,
+STORE, 4035686400, 4035690495,
+STORE, 4035690496, 4035694591,
+STORE, 4035694592, 4035743743,
+STORE, 4035743744, 4035784703,
+STORE, 4035784704, 4035829759,
+STORE, 4035829760, 4035837951,
+STORE, 4035837952, 4035842047,
+STORE, 4035842048, 4035846143,
+STORE, 4035846144, 4035850239,
+STORE, 4035850240, 4036001791,
+STORE, 4036001792, 4036005887,
+STORE, 4036005888, 4036214783,
+STORE, 4036214784, 4036218879,
+STORE, 4036218880, 4036603903,
+STORE, 4036603904, 4036648959,
+STORE, 4036648960, 4036653055,
+STORE, 4036653056, 4036657151,
+STORE, 4036657152, 4036665343,
+STORE, 4036665344, 4036780031,
+STORE, 4036780032, 4036829183,
+STORE, 4036829184, 4036984831,
+STORE, 4036984832, 4036993023,
+STORE, 4036993024, 4036997119,
+STORE, 4036997120, 4037001215,
+STORE, 4037001216, 4037009407,
+STORE, 4037009408, 4037025791,
+STORE, 4037025792, 4037095423,
+STORE, 4037095424, 4037181439,
+STORE, 4037181440, 4037193727,
+STORE, 4037193728, 4037197823,
+STORE, 4037197824, 4037206015,
+STORE, 4037206016, 4037320703,
+STORE, 4037320704, 4037337087,
+STORE, 4037337088, 4037349375,
+STORE, 4037349376, 4037357567,
+STORE, 4037357568, 4037361663,
+STORE, 4037369856, 4037386239,
+STORE, 4037386240, 4037672959,
+STORE, 4037672960, 4037689343,
+STORE, 4037689344, 4037730303,
+STORE, 4037730304, 4037734399,
+STORE, 4037734400, 4037738495,
+STORE, 4037738496, 4037742591,
+STORE, 4037742592, 4037758975,
+STORE, 4037758976, 4037890047,
+STORE, 4037890048, 4037931007,
+STORE, 4037931008, 4037976063,
+STORE, 4037976064, 4037984255,
+STORE, 4037984256, 4037988351,
+STORE, 4037988352, 4038053887,
+STORE, 4038053888, 4038184959,
+STORE, 4038184960, 4038189055,
+STORE, 4038189056, 4038197247,
+STORE, 4038197248, 4038201343,
+STORE, 4038201344, 4038205439,
+STORE, 4038205440, 4038209535,
+STORE, 4038217728, 4038250495,
+STORE, 4038250496, 4038512639,
+STORE, 4038512640, 4038516735,
+STORE, 4038516736, 4038520831,
+STORE, 4038520832, 4038524927,
+STORE, 4038524928, 4038529023,
+STORE, 4038529024, 4038533119,
+STORE, 4038541312, 4038623231,
+STORE, 4038623232, 4038754303,
+STORE, 4038754304, 4038885375,
+STORE, 4038885376, 4038889471,
+STORE, 4038897664, 4038963199,
+STORE, 4038963200, 4038967295,
+STORE, 4038967296, 4038983679,
+STORE, 4038983680, 4039114751,
+STORE, 4039114752, 4039245823,
+STORE, 4039245824, 4039376895,
+STORE, 4039376896, 4040687615,
+STORE, 4040687616, 4040691711,
+STORE, 4040691712, 4040806399,
+STORE, 4040806400, 4040937471,
+STORE, 4040937472, 4040941567,
+STORE, 4040945664, 4040949759,
+STORE, 4040949760, 4041080831,
+STORE, 4041080832, 4041211903,
+STORE, 4041211904, 4043046911,
+STORE, 4043046912, 4043051007,
+STORE, 4043051008, 4043055103,
+STORE, 4043055104, 4043137023,
+STORE, 4043137024, 4043141119,
+STORE, 4043141120, 4043145215,
+STORE, 4043145216, 4043153407,
+STORE, 4043153408, 4043186175,
+STORE, 4043186176, 4043317247,
+STORE, 4043317248, 4043448319,
+STORE, 4043448320, 4043579391,
+STORE, 4043579392, 4043583487,
+STORE, 4043583488, 4043599871,
+STORE, 4043599872, 4043661311,
+STORE, 4043661312, 4043792383,
+STORE, 4043792384, 4043796479,
+STORE, 4043796480, 4043800575,
+STORE, 4043800576, 4043816959,
+STORE, 4043816960, 4043821055,
+STORE, 4043821056, 4043825151,
+STORE, 4043825152, 4043829247,
+STORE, 4043829248, 4043833343,
+STORE, 4043833344, 4047241215,
+STORE, 4047241216, 4047249407,
+STORE, 4047249408, 4047253503,
+STORE, 4047253504, 4047323135,
+STORE, 4047323136, 4047327231,
+STORE, 4047327232, 4047458303,
+STORE, 4047458304, 4047589375,
+STORE, 4047589376, 4047720447,
+STORE, 4047720448, 4047773695,
+STORE, 4047773696, 4047790079,
+STORE, 4047790080, 4047921151,
+STORE, 4047921152, 4048052223,
+STORE, 4048052224, 4048183295,
+STORE, 4048183296, 4049002495,
+STORE, 4049002496, 4049133567,
+STORE, 4049133568, 4049154047,
+STORE, 4049154048, 4049158143,
+STORE, 4049158144, 4049162239,
+STORE, 4049162240, 4049166335,
+STORE, 4049166336, 4049174527,
+STORE, 4049174528, 4049182719,
+STORE, 4049182720, 4049186815,
+STORE, 4049186816, 4049190911,
+STORE, 4049190912, 4049195007,
+STORE, 4049195008, 4049203199,
+STORE, 4049203200, 4049207295,
+STORE, 4049207296, 4049211391,
+STORE, 4049211392, 4049215487,
+STORE, 4049215488, 4049219583,
+STORE, 4049219584, 4049227775,
+STORE, 4049227776, 4049231871,
+STORE, 4049231872, 4049235967,
+STORE, 4049235968, 4049244159,
+STORE, 4049244160, 4049248255,
+STORE, 4049248256, 4049252351,
+STORE, 4049252352, 4049256447,
+STORE, 4049256448, 4049268735,
+STORE, 4049268736, 4049272831,
+STORE, 4049272832, 4049313791,
+STORE, 4049313792, 4049723391,
+STORE, 4049723392, 4049727487,
+STORE, 4049727488, 4049858559,
+STORE, 4049858560, 4049989631,
+STORE, 4049989632, 4049993727,
+STORE, 4049993728, 4050026495,
+STORE, 4050026496, 4050030591,
+STORE, 4050030592, 4050161663,
+STORE, 4050161664, 4050169855,
+STORE, 4050169856, 4050223103,
+STORE, 4050223104, 4050632703,
+STORE, 4050632704, 4050636799,
+STORE, 4050636800, 4050640895,
+STORE, 4050640896, 4050644991,
+STORE, 4050644992, 4050661375,
+STORE, 4050661376, 4050665471,
+STORE, 4050665472, 4050673663,
+STORE, 4050673664, 4050677759,
+STORE, 4050677760, 4050694143,
+STORE, 4050694144, 4050702335,
+STORE, 4050702336, 4050956287,
+STORE, 4050956288, 4051963903,
+STORE, 4051963904, 4051980287,
+STORE, 4051980288, 4051988479,
+STORE, 4051988480, 4052000767,
+STORE, 4052000768, 4052004863,
+STORE, 4052004864, 4052029439,
+STORE, 4284014592, 4284018687,
+STORE, 4284018688, 4292403199,
+SNULL, 4041080832, 4041211903,
+SNULL, 3795763200, 3795894271,
+STORE, 3629522944, 3696631807,
+SNULL, 3663077375, 3696631807,
+STORE, 3629522944, 3663077375,
+STORE, 3663077376, 3696631807,
+SNULL, 3663077376, 3696631807,
+STORE, 3663077376, 3696631807,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3626471424, 3627524095,
+SNULL, 3626471424, 3626475519,
+STORE, 3626475520, 3627524095,
+STORE, 3626471424, 3626475519,
+SNULL, 3627519999, 3627524095,
+STORE, 3626475520, 3627519999,
+STORE, 3627520000, 3627524095,
+STORE, 3625418752, 3626475519,
+SNULL, 3625418752, 3625422847,
+STORE, 3625422848, 3626475519,
+STORE, 3625418752, 3625422847,
+SNULL, 3626467327, 3626475519,
+STORE, 3625422848, 3626467327,
+STORE, 3626467328, 3626475519,
+STORE, 3624366080, 3625422847,
+SNULL, 3624366080, 3624370175,
+STORE, 3624370176, 3625422847,
+STORE, 3624366080, 3624370175,
+SNULL, 3625414655, 3625422847,
+STORE, 3624370176, 3625414655,
+STORE, 3625414656, 3625422847,
+STORE, 4041191424, 4041211903,
+SNULL, 4041195519, 4041211903,
+STORE, 4041191424, 4041195519,
+STORE, 4041195520, 4041211903,
+STORE, 4041170944, 4041191423,
+SNULL, 4041175039, 4041191423,
+STORE, 4041170944, 4041175039,
+STORE, 4041175040, 4041191423,
+SNULL, 3625426943, 3626467327,
+STORE, 3625422848, 3625426943,
+STORE, 3625426944, 3626467327,
+STORE, 4041162752, 4041170943,
+SNULL, 3626479615, 3627519999,
+STORE, 3626475520, 3626479615,
+STORE, 3626479616, 3627519999,
+STORE, 4041154560, 4041162751,
+STORE, 4041154560, 4041170943,
+STORE, 4041134080, 4041154559,
+SNULL, 4041138175, 4041154559,
+STORE, 4041134080, 4041138175,
+STORE, 4041138176, 4041154559,
+SNULL, 3624374271, 3625414655,
+STORE, 3624370176, 3624374271,
+STORE, 3624374272, 3625414655,
+STORE, 4041125888, 4041134079,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+STORE, 4041121792, 4041125887,
+SNULL, 4041121792, 4041125887,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+STORE, 3222274048, 3223326719,
+SNULL, 3222274048, 3222278143,
+STORE, 3222278144, 3223326719,
+STORE, 3222274048, 3222278143,
+SNULL, 3223322623, 3223326719,
+STORE, 3222278144, 3223322623,
+STORE, 3223322624, 3223326719,
+STORE, 3221221376, 3222278143,
+SNULL, 3221221376, 3221225471,
+STORE, 3221225472, 3222278143,
+STORE, 3221221376, 3221225471,
+SNULL, 3222269951, 3222278143,
+STORE, 3221225472, 3222269951,
+STORE, 3222269952, 3222278143,
+STORE, 3220168704, 3221225471,
+SNULL, 3220168704, 3220172799,
+STORE, 3220172800, 3221225471,
+STORE, 3220168704, 3220172799,
+SNULL, 3221217279, 3221225471,
+STORE, 3220172800, 3221217279,
+STORE, 3221217280, 3221225471,
+STORE, 4041117696, 4041125887,
+STORE, 4041117696, 4041134079,
+STORE, 3219083264, 3220172799,
+SNULL, 3219083264, 3219087359,
+STORE, 3219087360, 3220172799,
+STORE, 3219083264, 3219087359,
+SNULL, 3220164607, 3220172799,
+STORE, 3219087360, 3220164607,
+STORE, 3220164608, 3220172799,
+STORE, 4041109504, 4041117695,
+STORE, 4041109504, 4041134079,
+STORE, 3217997824, 3219087359,
+SNULL, 3217997824, 3218001919,
+STORE, 3218001920, 3219087359,
+STORE, 3217997824, 3218001919,
+SNULL, 3219079167, 3219087359,
+STORE, 3218001920, 3219079167,
+STORE, 3219079168, 3219087359,
+STORE, 4041101312, 4041109503,
+STORE, 4041101312, 4041134079,
+STORE, 3216912384, 3218001919,
+SNULL, 3216912384, 3216916479,
+STORE, 3216916480, 3218001919,
+STORE, 3216912384, 3216916479,
+SNULL, 3217993727, 3218001919,
+STORE, 3216916480, 3217993727,
+STORE, 3217993728, 3218001919,
+STORE, 4041093120, 4041101311,
+STORE, 4041093120, 4041134079,
+STORE, 3215826944, 3216916479,
+SNULL, 3215826944, 3215831039,
+STORE, 3215831040, 3216916479,
+STORE, 3215826944, 3215831039,
+SNULL, 3216908287, 3216916479,
+STORE, 3215831040, 3216908287,
+STORE, 3216908288, 3216916479,
+STORE, 4016779264, 4016799743,
+SNULL, 4016783359, 4016799743,
+STORE, 4016779264, 4016783359,
+STORE, 4016783360, 4016799743,
+STORE, 4016758784, 4016779263,
+SNULL, 4016762879, 4016779263,
+STORE, 4016758784, 4016762879,
+STORE, 4016762880, 4016779263,
+SNULL, 3222282239, 3223322623,
+STORE, 3222278144, 3222282239,
+STORE, 3222282240, 3223322623,
+STORE, 4041084928, 4041093119,
+STORE, 4041084928, 4041134079,
+SNULL, 3221229567, 3222269951,
+STORE, 3221225472, 3221229567,
+STORE, 3221229568, 3222269951,
+STORE, 4015644672, 4015665151,
+STORE, 4038889472, 4038897663,
+SNULL, 4015648767, 4015665151,
+STORE, 4015644672, 4015648767,
+STORE, 4015648768, 4015665151,
+STORE, 4015624192, 4015644671,
+SNULL, 4015628287, 4015644671,
+STORE, 4015624192, 4015628287,
+STORE, 4015628288, 4015644671,
+SNULL, 3219091455, 3220164607,
+STORE, 3219087360, 3219091455,
+STORE, 3219091456, 3220164607,
+STORE, 4015603712, 4015624191,
+SNULL, 4015607807, 4015624191,
+STORE, 4015603712, 4015607807,
+STORE, 4015607808, 4015624191,
+SNULL, 3218006015, 3219079167,
+STORE, 3218001920, 3218006015,
+STORE, 3218006016, 3219079167,
+STORE, 3949674496, 3949694975,
+SNULL, 3949678591, 3949694975,
+STORE, 3949674496, 3949678591,
+STORE, 3949678592, 3949694975,
+SNULL, 3216920575, 3217993727,
+STORE, 3216916480, 3216920575,
+STORE, 3216920576, 3217993727,
+STORE, 3948924928, 3948945407,
+SNULL, 3948929023, 3948945407,
+STORE, 3948924928, 3948929023,
+STORE, 3948929024, 3948945407,
+SNULL, 3215835135, 3216908287,
+STORE, 3215831040, 3215835135,
+STORE, 3215835136, 3216908287,
+SNULL, 3220176895, 3221217279,
+STORE, 3220172800, 3220176895,
+STORE, 3220176896, 3221217279,
+STORE, 3214786560, 3215826943,
+STORE, 3213733888, 3214786559,
+SNULL, 3213733888, 3213737983,
+STORE, 3213737984, 3214786559,
+STORE, 3213733888, 3213737983,
+SNULL, 3214782463, 3214786559,
+STORE, 3213737984, 3214782463,
+STORE, 3214782464, 3214786559,
+STORE, 4038533120, 4038541311,
+STORE, 3948421120, 3948441599,
+SNULL, 3948425215, 3948441599,
+STORE, 3948421120, 3948425215,
+STORE, 3948425216, 3948441599,
+SNULL, 3213742079, 3214782463,
+STORE, 3213737984, 3213742079,
+STORE, 3213742080, 3214782463,
+STORE, 4038209536, 4038217727,
+STORE, 3212681216, 3213737983,
+SNULL, 3212681216, 3212685311,
+STORE, 3212685312, 3213737983,
+STORE, 3212681216, 3212685311,
+SNULL, 3213729791, 3213737983,
+STORE, 3212685312, 3213729791,
+STORE, 3213729792, 3213737983,
+STORE, 3795763200, 3795894271,
+STORE, 3946872832, 3946893311,
+SNULL, 3946876927, 3946893311,
+STORE, 3946872832, 3946876927,
+STORE, 3946876928, 3946893311,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+SNULL, 3212689407, 3213729791,
+STORE, 3212685312, 3212689407,
+STORE, 3212689408, 3213729791,
+STORE, 4041080832, 4041084927,
+STORE, 4040941568, 4040945663,
+STORE, 4037361664, 4037369855,
+STORE, 4000817152, 4000821247,
+STORE, 3999440896, 3999444991,
+STORE, 3212161024, 3212681215,
+SNULL, 3212161024, 3212439551,
+STORE, 3212439552, 3212681215,
+STORE, 3212161024, 3212439551,
+SNULL, 3212161024, 3212439551,
+SNULL, 3212464127, 3212681215,
+STORE, 3212439552, 3212464127,
+STORE, 3212464128, 3212681215,
+SNULL, 3212464128, 3212681215,
+SNULL, 3212439552, 3212451839,
+STORE, 3212451840, 3212464127,
+STORE, 3212439552, 3212451839,
+SNULL, 3212439552, 3212451839,
+STORE, 3212439552, 3212451839,
+SNULL, 3212451840, 3212455935,
+STORE, 3212455936, 3212464127,
+STORE, 3212451840, 3212455935,
+SNULL, 3212451840, 3212455935,
+STORE, 3212451840, 3212455935,
+SNULL, 3212455936, 3212460031,
+STORE, 3212460032, 3212464127,
+STORE, 3212455936, 3212460031,
+SNULL, 3212455936, 3212460031,
+STORE, 3212455936, 3212460031,
+SNULL, 3212460032, 3212464127,
+STORE, 3212460032, 3212464127,
+STORE, 3997679616, 3997683711,
+SNULL, 4049235968, 4049240063,
+STORE, 4049240064, 4049244159,
+STORE, 4049235968, 4049240063,
+SNULL, 4049240064, 4049244159,
+STORE, 4049240064, 4049244159,
+SNULL, 3997679616, 3997683711,
+SNULL, 3999440896, 3999444991,
+SNULL, 4000817152, 4000821247,
+SNULL, 4040941568, 4040945663,
+SNULL, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+SNULL, 3212451840, 3212455935,
+STORE, 3212451840, 3212455935,
+STORE, 4041080832, 4041084927,
+STORE, 3623890944, 3624169471,
+SNULL, 4041080832, 4041084927,
+STORE, 4041080832, 4041084927,
+SNULL, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 4041080832, 4041084927,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3211386880, 3212439551,
+SNULL, 3211386880, 3211390975,
+STORE, 3211390976, 3212439551,
+STORE, 3211386880, 3211390975,
+SNULL, 3212435455, 3212439551,
+STORE, 3211390976, 3212435455,
+STORE, 3212435456, 3212439551,
+STORE, 4040941568, 4040945663,
+STORE, 3937169408, 3937189887,
+STORE, 3623485440, 3623616511,
+SNULL, 717225983, 1388314623,
+STORE, 314572800, 717225983,
+STORE, 717225984, 1388314623,
+SNULL, 717225984, 1388314623,
+STORE, 3937112064, 3937132543,
+SNULL, 3937116159, 3937132543,
+STORE, 3937112064, 3937116159,
+STORE, 3937116160, 3937132543,
+SNULL, 3211395071, 3212435455,
+STORE, 3211390976, 3211395071,
+STORE, 3211395072, 3212435455,
+STORE, 4000817152, 4000821247,
+STORE, 3974823936, 3974832127,
+STORE, 3595284480, 3595431935,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+STORE, 3487174656, 3487584255,
+STORE, 3999440896, 3999444991,
+STORE, 3997679616, 3997683711,
+STORE, 3996295168, 3996299263,
+STORE, 3996090368, 3996094463,
+STORE, 3210866688, 3211386879,
+SNULL, 3210866688, 3211001855,
+STORE, 3211001856, 3211386879,
+STORE, 3210866688, 3211001855,
+SNULL, 3210866688, 3211001855,
+SNULL, 3211038719, 3211386879,
+STORE, 3211001856, 3211038719,
+STORE, 3211038720, 3211386879,
+SNULL, 3211038720, 3211386879,
+SNULL, 3211001856, 3211022335,
+STORE, 3211022336, 3211038719,
+STORE, 3211001856, 3211022335,
+SNULL, 3211001856, 3211022335,
+STORE, 3211001856, 3211022335,
+SNULL, 3211022336, 3211030527,
+STORE, 3211030528, 3211038719,
+STORE, 3211022336, 3211030527,
+SNULL, 3211022336, 3211030527,
+STORE, 3211022336, 3211030527,
+SNULL, 3211030528, 3211034623,
+STORE, 3211034624, 3211038719,
+STORE, 3211030528, 3211034623,
+SNULL, 3211030528, 3211034623,
+STORE, 3211030528, 3211034623,
+SNULL, 3211034624, 3211038719,
+STORE, 3211034624, 3211038719,
+STORE, 3994906624, 3994910719,
+SNULL, 4049240064, 4049244159,
+STORE, 4049240064, 4049244159,
+SNULL, 3994906624, 3994910719,
+SNULL, 3996090368, 3996094463,
+SNULL, 3996295168, 3996299263,
+SNULL, 3997679616, 3997683711,
+SNULL, 3999440896, 3999444991,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 3487174656, 3487584255,
+SNULL, 3211022336, 3211030527,
+STORE, 3211022336, 3211030527,
+STORE, 3999440896, 3999444991,
+STORE, 3210199040, 3211001855,
+SNULL, 3999440896, 3999444991,
+STORE, 3999440896, 3999444991,
+SNULL, 3999440896, 3999444991,
+STORE, 3594821632, 3594952703,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 4048183296, 4048592895,
+STORE, 4048592896, 4049002495,
+STORE, 4048183296, 4048592895,
+STORE, 4048183296, 4049002495,
+SNULL, 1914101759, 1969434623,
+STORE, 1914097664, 1914101759,
+STORE, 1914101760, 1969434623,
+STORE, 3567108096, 3567239167,
+STORE, 3973832704, 3973840895,
+STORE, 3209113600, 3210199039,
+SNULL, 3209113600, 3209117695,
+STORE, 3209117696, 3210199039,
+STORE, 3209113600, 3209117695,
+SNULL, 3210194943, 3210199039,
+STORE, 3209117696, 3210194943,
+STORE, 3210194944, 3210199039,
+STORE, 3935858688, 3935879167,
+SNULL, 3935862783, 3935879167,
+STORE, 3935858688, 3935862783,
+STORE, 3935862784, 3935879167,
+SNULL, 3209121791, 3210194943,
+STORE, 3209117696, 3209121791,
+STORE, 3209121792, 3210194943,
+STORE, 3528749056, 3528880127,
+STORE, 3968200704, 3968208895,
+STORE, 3208028160, 3209117695,
+SNULL, 3208028160, 3208032255,
+STORE, 3208032256, 3209117695,
+STORE, 3208028160, 3208032255,
+SNULL, 3209109503, 3209117695,
+STORE, 3208032256, 3209109503,
+STORE, 3209109504, 3209117695,
+STORE, 3888123904, 3888144383,
+SNULL, 3888127999, 3888144383,
+STORE, 3888123904, 3888127999,
+STORE, 3888128000, 3888144383,
+SNULL, 3208036351, 3209109503,
+STORE, 3208032256, 3208036351,
+STORE, 3208036352, 3209109503,
+SNULL, 3968200704, 3968208895,
+SNULL, 3888123904, 3888144383,
+SNULL, 3209109504, 3209113599,
+STORE, 3209113600, 3209117695,
+STORE, 3209109504, 3209113599,
+SNULL, 3208028160, 3209113599,
+STORE, 3208060928, 3209117695,
+SNULL, 3208060928, 3208065023,
+STORE, 3208065024, 3209117695,
+STORE, 3208060928, 3208065023,
+SNULL, 3209109503, 3209117695,
+STORE, 3208065024, 3209109503,
+STORE, 3209109504, 3209117695,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3888123904, 3888144383,
+SNULL, 3888127999, 3888144383,
+STORE, 3888123904, 3888127999,
+STORE, 3888128000, 3888144383,
+SNULL, 3208069119, 3209109503,
+STORE, 3208065024, 3208069119,
+STORE, 3208069120, 3209109503,
+STORE, 3968200704, 3968208895,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3527778304, 3527909375,
+STORE, 3999440896, 3999444991,
+STORE, 3997679616, 3997683711,
+STORE, 1914097664, 1914105855,
+STORE, 1914105856, 1969434623,
+STORE, 3957583872, 3957592063,
+STORE, 3206975488, 3208065023,
+SNULL, 3206975488, 3206979583,
+STORE, 3206979584, 3208065023,
+STORE, 3206975488, 3206979583,
+SNULL, 3208056831, 3208065023,
+STORE, 3206979584, 3208056831,
+STORE, 3208056832, 3208065023,
+STORE, 3956736000, 3956744191,
+STORE, 3205890048, 3206979583,
+SNULL, 3205890048, 3205894143,
+STORE, 3205894144, 3206979583,
+STORE, 3205890048, 3205894143,
+SNULL, 3206971391, 3206979583,
+STORE, 3205894144, 3206971391,
+STORE, 3206971392, 3206979583,
+STORE, 3806101504, 3806121983,
+SNULL, 3806105599, 3806121983,
+STORE, 3806101504, 3806105599,
+STORE, 3806105600, 3806121983,
+SNULL, 3206983679, 3208056831,
+STORE, 3206979584, 3206983679,
+STORE, 3206983680, 3208056831,
+STORE, 3806081024, 3806101503,
+SNULL, 3806085119, 3806101503,
+STORE, 3806081024, 3806085119,
+STORE, 3806085120, 3806101503,
+SNULL, 3205898239, 3206971391,
+STORE, 3205894144, 3205898239,
+STORE, 3205898240, 3206971391,
+STORE, 3956015104, 3956023295,
+STORE, 3204804608, 3205894143,
+SNULL, 3204804608, 3204808703,
+STORE, 3204808704, 3205894143,
+STORE, 3204804608, 3204808703,
+SNULL, 3205885951, 3205894143,
+STORE, 3204808704, 3205885951,
+STORE, 3205885952, 3205894143,
+STORE, 3803471872, 3803492351,
+STORE, 3803451392, 3803471871,
+STORE, 3803451392, 3803492351,
+SNULL, 3957583872, 3957592063,
+SNULL, 3806101504, 3806121983,
+SNULL, 3206975487, 3206979583,
+STORE, 3206971392, 3206975487,
+STORE, 3206975488, 3206979583,
+SNULL, 3208056832, 3208060927,
+STORE, 3208060928, 3208065023,
+STORE, 3208056832, 3208060927,
+SNULL, 3206975488, 3208060927,
+STORE, 3801845760, 3801878527,
+STORE, 3806101504, 3806121983,
+SNULL, 3806105599, 3806121983,
+STORE, 3806101504, 3806105599,
+STORE, 3806105600, 3806121983,
+SNULL, 3204812799, 3205885951,
+STORE, 3204808704, 3204812799,
+STORE, 3204812800, 3205885951,
+STORE, 1914097664, 1914109951,
+STORE, 1914109952, 1969434623,
+STORE, 3957583872, 3957592063,
+STORE, 3206971392, 3208065023,
+SNULL, 3206971392, 3206979583,
+STORE, 3206979584, 3208065023,
+STORE, 3206971392, 3206979583,
+SNULL, 3208056831, 3208065023,
+STORE, 3206979584, 3208056831,
+STORE, 3208056832, 3208065023,
+STORE, 3801825280, 3801845759,
+SNULL, 3801829375, 3801845759,
+STORE, 3801825280, 3801829375,
+STORE, 3801829376, 3801845759,
+SNULL, 3206983679, 3208056831,
+STORE, 3206979584, 3206983679,
+STORE, 3206983680, 3208056831,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3202707456, 3204804607,
+STORE, 3200610304, 3202707455,
+SNULL, 3202707456, 3204804607,
+SNULL, 3200610304, 3202707455,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3202707456, 3204804607,
+SNULL, 3202707456, 3204804607,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3527647232, 3527778303,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+STORE, 3487059968, 3487584255,
+SNULL, 3487059968, 3487301631,
+STORE, 3487301632, 3487584255,
+STORE, 3487059968, 3487301631,
+SNULL, 3487059968, 3487301631,
+SNULL, 3487563775, 3487584255,
+STORE, 3487301632, 3487563775,
+STORE, 3487563776, 3487584255,
+SNULL, 3487563776, 3487584255,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3524046848, 3524177919,
+STORE, 3487170560, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3487039488, 3487170559,
+STORE, 3487039488, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3204280320, 3204804607,
+SNULL, 3204280320, 3204448255,
+STORE, 3204448256, 3204804607,
+STORE, 3204280320, 3204448255,
+SNULL, 3204280320, 3204448255,
+SNULL, 3204710399, 3204804607,
+STORE, 3204448256, 3204710399,
+STORE, 3204710400, 3204804607,
+SNULL, 3204710400, 3204804607,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3996295168, 3996299263,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+SNULL, 3996295168, 3996299263,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3486908416, 3487039487,
+STORE, 3486908416, 3487301631,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3223326720, 3290435583,
+SNULL, 3223326720, 3256881151,
+STORE, 3256881152, 3290435583,
+STORE, 3223326720, 3256881151,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+STORE, 3201826816, 3202351103,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+STORE, 3202351104, 3204448255,
+SNULL, 3202351104, 3204448255,
+SNULL, 3803471871, 3803492351,
+STORE, 3803451392, 3803471871,
+STORE, 3803471872, 3803492351,
+SNULL, 3803471872, 3803492351,
+SNULL, 3803451392, 3803471871,
+STORE, 3798999040, 3799101439,
+SNULL, 3798999040, 3799101439,
+STORE, 3952644096, 3952652287,
+STORE, 3203362816, 3204448255,
+SNULL, 3203362816, 3203366911,
+STORE, 3203366912, 3204448255,
+STORE, 3203362816, 3203366911,
+SNULL, 3204444159, 3204448255,
+STORE, 3203366912, 3204444159,
+STORE, 3204444160, 3204448255,
+STORE, 3803471872, 3803492351,
+SNULL, 3803475967, 3803492351,
+STORE, 3803471872, 3803475967,
+STORE, 3803475968, 3803492351,
+SNULL, 3203371007, 3204444159,
+STORE, 3203366912, 3203371007,
+STORE, 3203371008, 3204444159,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3199729664, 3201826815,
+SNULL, 3199729664, 3201826815,
+STORE, 3200774144, 3201826815,
+SNULL, 3200774144, 3200778239,
+STORE, 3200778240, 3201826815,
+STORE, 3200774144, 3200778239,
+SNULL, 3201822719, 3201826815,
+STORE, 3200778240, 3201822719,
+STORE, 3201822720, 3201826815,
+STORE, 3803451392, 3803471871,
+SNULL, 3803455487, 3803471871,
+STORE, 3803451392, 3803455487,
+STORE, 3803455488, 3803471871,
+SNULL, 3200782335, 3201822719,
+STORE, 3200778240, 3200782335,
+STORE, 3200782336, 3201822719,
+STORE, 3949666304, 3949674495,
+STORE, 3949408256, 3949416447,
+STORE, 3199688704, 3200778239,
+SNULL, 3199688704, 3199692799,
+STORE, 3199692800, 3200778239,
+STORE, 3199688704, 3199692799,
+SNULL, 3200770047, 3200778239,
+STORE, 3199692800, 3200770047,
+STORE, 3200770048, 3200778239,
+STORE, 3799306240, 3799326719,
+SNULL, 3799310335, 3799326719,
+STORE, 3799306240, 3799310335,
+STORE, 3799310336, 3799326719,
+SNULL, 3199696895, 3200770047,
+STORE, 3199692800, 3199696895,
+STORE, 3199696896, 3200770047,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3799277568, 3799306239,
+SNULL, 3799277568, 3799306239,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+SNULL, 4041162751, 4041170943,
+STORE, 4041154560, 4041162751,
+STORE, 4041162752, 4041170943,
+SNULL, 4041162752, 4041170943,
+SNULL, 4041154560, 4041162751,
+SNULL, 4041191424, 4041211903,
+SNULL, 4041170944, 4041191423,
+SNULL, 3626471423, 3626475519,
+STORE, 3626467328, 3626471423,
+STORE, 3626471424, 3626475519,
+SNULL, 3626471424, 3627524095,
+SNULL, 3625418751, 3625422847,
+STORE, 3625414656, 3625418751,
+STORE, 3625418752, 3625422847,
+SNULL, 3625418752, 3626471423,
+STORE, 3627393024, 3627524095,
+STORE, 3627261952, 3627393023,
+STORE, 3627261952, 3627524095,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3195494400, 3197591551,
+SNULL, 3197591552, 3199688703,
+SNULL, 3195494400, 3197591551,
+STORE, 3197591552, 3199688703,
+SNULL, 3197591552, 3199688703,
+STORE, 3197591552, 3199688703,
+STORE, 3195494400, 3197591551,
+SNULL, 3197591552, 3199688703,
+SNULL, 3195494400, 3197591551,
+STORE, 3798999040, 3799101439,
+SNULL, 3798999040, 3799101439,
+/*
+ * mmap: unmapped_area_topdown: ffff9a9f14ddaa80
+ * Gap was found: mt 4041162752 gap_end 4041183232
+ * mmap: window was 4052029440 - 4096 size 28672
+ * mmap: mas.min 4041154560 max 4041191423 mas.last 4041191423
+ * mmap: mas.index 4041162752 align mask 0 offset 0
+ * mmap: rb_find_vma find on 4041162752 => ffff9a9f03d19678 (ffff9a9f03d19678)
+ */
+ };
+
+ unsigned long set43[] = {
+STORE, 140737488347136, 140737488351231,
+STORE, 140734187720704, 140737488351231,
+SNULL, 140734187724800, 140737488351231,
+STORE, 140734187589632, 140734187724799,
+STORE, 4194304, 6443007,
+STORE, 4337664, 6443007,
+STORE, 4194304, 4337663,
+SNULL, 4337664, 6443007,
+STORE, 6430720, 6443007,
+STORE, 206158430208, 206160674815,
+STORE, 206158569472, 206160674815,
+STORE, 206158430208, 206158569471,
+SNULL, 206158569472, 206160674815,
+STORE, 206160662528, 206160670719,
+STORE, 206160670720, 206160674815,
+STORE, 140734188756992, 140734188765183,
+STORE, 140734188740608, 140734188756991,
+STORE, 140501948112896, 140501948116991,
+ };
+
+ int count = 0;
+ void *ptr = NULL;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_set_non_kernel(3);
+ check_erase2_testset(mt, set, ARRAY_SIZE(set));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set2, ARRAY_SIZE(set2));
+ start = 140735933894656;
+ MT_BUG_ON(mt, !!mt_find(mt, &start, 140735933906943UL));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(2);
+ mt_init_flags(mt, 0);
+ check_erase2_testset(mt, set3, ARRAY_SIZE(set3));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ check_erase2_testset(mt, set4, ARRAY_SIZE(set4));
+ rcu_read_lock();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+ }
+ rcu_read_unlock();
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(100);
+ check_erase2_testset(mt, set5, ARRAY_SIZE(set5));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set6, ARRAY_SIZE(set6));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set7, ARRAY_SIZE(set7));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set8, ARRAY_SIZE(set8));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set9, ARRAY_SIZE(set9));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set10, ARRAY_SIZE(set10));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set11, ARRAY_SIZE(set11));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 12288, 140014592737280, 0x2000);
+ MT_BUG_ON(mt, mas.last != 140014592573439);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mas.tree = mt;
+ count = 0;
+ mas.index = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set12, ARRAY_SIZE(set12));
+ rcu_barrier();
+ mas_for_each(&mas, entry, ULONG_MAX) {
+ if (xa_is_zero(entry))
+ continue;
+ BUG_ON(count > 12);
+ count++;
+ }
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set13, ARRAY_SIZE(set13));
+ mtree_erase(mt, 140373516443648);
+ rcu_read_lock();
+ mas_empty_area_rev(&mas, 0, 140373518663680, 4096);
+ rcu_read_unlock();
+ mtree_destroy(mt);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set14, ARRAY_SIZE(set14));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set15, ARRAY_SIZE(set15));
+ rcu_barrier();
+ mtree_destroy(mt);
+
+ /* set16 was to find a bug on limit updating at slot 0. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set16, ARRAY_SIZE(set16));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139921865637888, 0x6000);
+ MT_BUG_ON(mt, mas.last != 139921865547775);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set17 found a bug in walking backwards and not counting nulls at
+ * the end. This could cause a gap to be missed if the null had any
+ * size.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set17, ARRAY_SIZE(set17));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139953197334528, 0x1000);
+ MT_BUG_ON(mt, mas.last != 139953197322239);
+/* MT_BUG_ON(mt, mas.index != 139953197318144); */
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set18 found a bug in walking backwards and not setting the max from
+ * the node, but using the parent node. This was only an issue if the
+ * next slot in the parent had what we needed.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set18, ARRAY_SIZE(set18));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140222972858368, 2215936);
+ MT_BUG_ON(mt, mas.last != 140222968475647);
+ /*MT_BUG_ON(mt, mas.index != 140222966259712); */
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set19 found 2 bugs in prev.
+ * 1. If we hit root without finding anything, then there was an
+ * infinite loop.
+ * 2. The first ascending wasn't using the correct slot which may have
+ * caused missed entries.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set19, ARRAY_SIZE(set19));
+ rcu_barrier();
+ mas.index = 140656779083776;
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(140656779083776));
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != xa_mk_value(140656766251008));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ /*
+ * set20 found a bug in mas_may_move_gap due to the slot being
+ * overwritten during the __mas_add operation and setting it to zero.
+ */
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set20, ARRAY_SIZE(set20));
+ rcu_barrier();
+ check_load(mt, 94849009414144, NULL);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set21, ARRAY_SIZE(set21));
+ rcu_barrier();
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(999);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set22, ARRAY_SIZE(set22));
+ rcu_barrier();
+ mt_validate(mt);
+ ptr = mtree_load(mt, 140551363362816);
+ MT_BUG_ON(mt, ptr == mtree_load(mt, 140551363420159));
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set23, ARRAY_SIZE(set23));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set24, ARRAY_SIZE(set24));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set25, ARRAY_SIZE(set25));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Split on NULL followed by delete - causes gap issues. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set26, ARRAY_SIZE(set26));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140109042671616, 409600);
+ MT_BUG_ON(mt, mas.last != 140109040959487);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Split on NULL followed by delete - causes gap issues. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set27, ARRAY_SIZE(set27));
+ rcu_barrier();
+ MT_BUG_ON(mt, 0 != mtree_load(mt, 140415537422336));
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set28, ARRAY_SIZE(set28));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 139918413357056, 2097152);
+ /* Search for the size of gap then align it (offset 0) */
+ mas.index = (mas.last + 1 - 2097152 - 0) & (~2093056);
+ MT_BUG_ON(mt, mas.index != 139918401601536);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found issues with retry moving rebalanced nodes so the
+ * incorrect parent pivot was updated.
+ */
+ mt_set_non_kernel(999);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set29, ARRAY_SIZE(set29));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found issues with deleting all entries in a node when
+ * surrounded by entries in the next nodes, then deleting the entries
+ * surrounding the node filled with deleted entries.
+ */
+ mt_set_non_kernel(999);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set30, ARRAY_SIZE(set30));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* This test found an issue with deleting all entries in a node that was
+ * the end node and mas_gap incorrectly set next = curr, and curr = prev
+ * then moved next to the left, losing data.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set31, ARRAY_SIZE(set31));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set32, ARRAY_SIZE(set32));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+/*
+ * mmap: empty_area_topdown: ffff88821c9cb600 Gap was found:
+ * mt 140582827569152 gap_end 140582869532672
+ * mmap: window was 140583656296448 - 4096 size 134217728
+ * mmap: mas.min 94133881868288 max 140582961786879 mas.last 140582961786879
+ * mmap: mas.index 140582827569152 align mask 0 offset 0
+ * mmap: rb_find_vma find on
+ * 140582827569152 => ffff88821c5bad00 (ffff88821c5bad00)
+ */
+
+ /* move gap failed due to an entirely empty node */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set33, ARRAY_SIZE(set33));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 140583656296448, 134217728);
+ MT_BUG_ON(mt, mas.last != 140583003750399);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Incorrect gap in tree caused by mas_prev not setting the limits
+ * correctly while walking down.
+ */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set34, ARRAY_SIZE(set34));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Empty leaf at the end of a parent caused incorrect gap. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set35, ARRAY_SIZE(set35));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(99);
+ /* Empty leaf at the end of a parent caused incorrect gap. */
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set36, ARRAY_SIZE(set36));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set37, ARRAY_SIZE(set37));
+ rcu_barrier();
+ MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712));
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set38, ARRAY_SIZE(set38));
+ rcu_barrier();
+ MT_BUG_ON(mt, 0 != mtree_load(mt, 94637033459712));
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set39, ARRAY_SIZE(set39));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set40, ARRAY_SIZE(set40));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set41, ARRAY_SIZE(set41));
+ rcu_barrier();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* move gap failed due to an entirely empty node. */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set42, ARRAY_SIZE(set42));
+ rcu_barrier();
+ mas_empty_area_rev(&mas, 4096, 4052029440, 28672);
+ MT_BUG_ON(mt, mas.last != 4041211903);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* gap calc off by one */
+ mt_set_non_kernel(99);
+ mas_reset(&mas);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_erase2_testset(mt, set43, ARRAY_SIZE(set43));
+ rcu_barrier();
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ mtree_destroy(mt);
+}
+
+static noinline void check_alloc_rev_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps | awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+
+ unsigned long holes[] = {
+ /*
+ * Note: start of hole is INCLUSIVE
+ * end of hole is EXCLUSIVE
+ * (opposite of the above table.)
+ * Start of hole, end of hole, size of hole (+1)
+ */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x7fff5878d << 12, /* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x565234AF1 << 12, /* Max */
+ 0x3000, /* Size */
+ 0x565234AEE << 12, /* max - 3. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ -1, /* Max */
+ 0x1000, /* Size */
+ 562949953421311 << 12,/* First rev hole of size 0x1000 */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7F36D510A << 12, /* Max */
+ 0x4000, /* Size */
+ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */
+ 0, /* Return value success. */
+
+ /* Ascend test. */
+ 0x0,
+ 34148798629 << 12,
+ 19 << 12,
+ 34148797418 << 12,
+ 0x0,
+
+ /* Too big test. */
+ 0x0,
+ 18446744073709551615UL,
+ 562915594369134UL << 12,
+ 0x0,
+ -EBUSY,
+
+ };
+
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+#define DEBUG_REV_RANGE 0
+ for (i = 0; i < range_count; i += 2) {
+ /* Inclusive, Inclusive (with the -1) */
+
+#if DEBUG_REV_RANGE
+ pr_debug("\t%s: Insert %lu-%lu\n", __func__, range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+#if DEBUG_REV_RANGE
+ pr_debug("Search from %lu-%lu for gap %lu should be at %lu\n",
+ min, holes[i+1]>>12, holes[i+2]>>12,
+ holes[i] >> 12);
+#endif
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, min,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+#if DEBUG_REV_RANGE
+ pr_debug("Found %lu %lu\n", mas.index, mas.last);
+ pr_debug("gap %lu %lu\n", (holes[i] >> 12),
+ (holes[i+1] >> 12));
+#endif
+ MT_BUG_ON(mt, mas.last + 1 != (holes[i+1] >> 12));
+ MT_BUG_ON(mt, mas.index != (holes[i+1] >> 12) - (holes[i+2] >> 12));
+ min = holes[i+1] >> 12;
+ mas_reset(&mas);
+ }
+
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_REV_RANGE
+ pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n",
+ req_range[i] >> 12,
+ (req_range[i + 1] >> 12) - 1,
+ req_range[i+2] >> 12,
+ req_range[i+3] >> 12);
+#endif
+ check_mtree_alloc_rrange(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+ }
+
+ mt_set_non_kernel(1);
+ mtree_erase(mt, 34148798727); /* create a deleted range. */
+ check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414,
+ 34148798725, 0, mt);
+
+ mtree_destroy(mt);
+}
+
+static noinline void check_alloc_range(struct maple_tree *mt)
+{
+ /*
+ * Generated by:
+ * cat /proc/self/maps|awk '{print $1}'|
+ * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}'
+ */
+
+ unsigned long range[] = {
+ /* Inclusive , Exclusive. */
+ 0x565234af2000, 0x565234af4000,
+ 0x565234af4000, 0x565234af9000,
+ 0x565234af9000, 0x565234afb000,
+ 0x565234afc000, 0x565234afd000,
+ 0x565234afd000, 0x565234afe000,
+ 0x565235def000, 0x565235e10000,
+ 0x7f36d4bfd000, 0x7f36d4ee2000,
+ 0x7f36d4ee2000, 0x7f36d4f04000,
+ 0x7f36d4f04000, 0x7f36d504c000,
+ 0x7f36d504c000, 0x7f36d5098000,
+ 0x7f36d5098000, 0x7f36d5099000,
+ 0x7f36d5099000, 0x7f36d509d000,
+ 0x7f36d509d000, 0x7f36d509f000,
+ 0x7f36d509f000, 0x7f36d50a5000,
+ 0x7f36d50b9000, 0x7f36d50db000,
+ 0x7f36d50db000, 0x7f36d50dc000,
+ 0x7f36d50dc000, 0x7f36d50fa000,
+ 0x7f36d50fa000, 0x7f36d5102000,
+ 0x7f36d5102000, 0x7f36d5103000,
+ 0x7f36d5103000, 0x7f36d5104000,
+ 0x7f36d5104000, 0x7f36d5105000,
+ 0x7fff5876b000, 0x7fff5878d000,
+ 0x7fff5878e000, 0x7fff58791000,
+ 0x7fff58791000, 0x7fff58793000,
+ };
+ unsigned long holes[] = {
+ /* Start of hole, end of hole, size of hole (+1) */
+ 0x565234afb000, 0x565234afc000, 0x1000,
+ 0x565234afe000, 0x565235def000, 0x12F1000,
+ 0x565235e10000, 0x7f36d4bfd000, 0x28E49EDED000,
+ };
+
+ /*
+ * req_range consists of 4 values.
+ * 1. min index
+ * 2. max index
+ * 3. size
+ * 4. number that should be returned.
+ * 5. return value
+ */
+ unsigned long req_range[] = {
+ 0x565234af9000, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1000, /* Size */
+ 0x565234afb000, /* First hole in our data of size 1000. */
+ 0, /* Return value success. */
+
+ 0x0, /* Min */
+ 0x7fff58791000, /* Max */
+ 0x1F00, /* Size */
+ 0x0, /* First hole in our data of size 2000. */
+ 0, /* Return value success. */
+
+ /* Test ascend. */
+ 34148797436 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x3000, /* Size */
+ 34148798629 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test failing. */
+ 34148798623 << 12, /* Min */
+ 34148798683 << 12, /* Max */
+ 0x15000, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failed. */
+
+ /* Test filling entire gap. */
+ 34148798623 << 12, /* Min */
+ 0x7fff587AF000, /* Max */
+ 0x10000, /* Size */
+ 34148798632 << 12, /* Expected location */
+ 0, /* Return value success. */
+
+ /* Test walking off the end of root. */
+ 0, /* Min */
+ -1, /* Max */
+ -1, /* Size */
+ 0, /* Expected location */
+ -EBUSY, /* Return value failure. */
+
+ /* Test looking for too large a hole across entire range. */
+ 0, /* Min */
+ -1, /* Max */
+ 4503599618982063UL << 12, /* Size */
+ 34359052178 << 12, /* Expected location */
+ -EBUSY, /* Return failure. */
+ };
+ int i, range_count = ARRAY_SIZE(range);
+ int req_range_count = ARRAY_SIZE(req_range);
+ unsigned long min = 0x565234af2000;
+
+ mtree_store_range(mt, MTREE_ALLOC_MAX, ULONG_MAX, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ for (i = 0; i < range_count; i += 2) {
+#define DEBUG_ALLOC_RANGE 0
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tInsert %lu-%lu\n", range[i] >> 12,
+ (range[i + 1] >> 12) - 1);
+ mt_dump(mt);
+#endif
+ check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1,
+ xa_mk_value(range[i] >> 12), 0);
+ mt_validate(mt);
+ }
+
+
+ MA_STATE(mas, mt, 0, 0);
+
+ for (i = 0; i < ARRAY_SIZE(holes); i += 3) {
+
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tGet empty %lu-%lu size %lu (%lx-%lx)\n", min >> 12,
+ holes[i+1] >> 12, holes[i+2] >> 12,
+ min, holes[i+1]);
+#endif
+ MT_BUG_ON(mt, mas_empty_area(&mas, min >> 12,
+ holes[i+1] >> 12,
+ holes[i+2] >> 12));
+ MT_BUG_ON(mt, mas.index != holes[i] >> 12);
+ min = holes[i+1];
+ mas_reset(&mas);
+ }
+ for (i = 0; i < req_range_count; i += 5) {
+#if DEBUG_ALLOC_RANGE
+ pr_debug("\tTest %d: %lu-%lu size %lu expected %lu (%lu-%lu)\n",
+ i/5, req_range[i] >> 12, req_range[i + 1] >> 12,
+ req_range[i + 2] >> 12, req_range[i + 3] >> 12,
+ req_range[i], req_range[i+1]);
+#endif
+ check_mtree_alloc_range(mt,
+ req_range[i] >> 12, /* start */
+ req_range[i+1] >> 12, /* end */
+ req_range[i+2] >> 12, /* size */
+ req_range[i+3] >> 12, /* expected address */
+ req_range[i+4], /* expected return */
+ xa_mk_value(req_range[i] >> 12)); /* pointer */
+ mt_validate(mt);
+#if DEBUG_ALLOC_RANGE
+ mt_dump(mt);
+#endif
+ }
+
+ mtree_destroy(mt);
+}
+
+static noinline void check_ranges(struct maple_tree *mt)
+{
+ int i, val, val2;
+ unsigned long r[] = {
+ 10, 15,
+ 20, 25,
+ 17, 22, /* Overlaps previous range. */
+ 9, 1000, /* Huge. */
+ 100, 200,
+ 45, 168,
+ 118, 128,
+ };
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_insert_range(mt, r[0], r[1], xa_mk_value(r[0]), 0);
+ check_insert_range(mt, r[2], r[3], xa_mk_value(r[2]), 0);
+ check_insert_range(mt, r[4], r[5], xa_mk_value(r[4]), -EEXIST);
+ MT_BUG_ON(mt, !mt_height(mt));
+ /* Store */
+ check_store_range(mt, r[4], r[5], xa_mk_value(r[4]), 0);
+ check_store_range(mt, r[6], r[7], xa_mk_value(r[6]), 0);
+ check_store_range(mt, r[8], r[9], xa_mk_value(r[8]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ MT_BUG_ON(mt, mt_height(mt));
+
+ check_seq(mt, 50, false);
+ mt_set_non_kernel(4);
+ check_store_range(mt, 5, 47, xa_mk_value(47), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-100 */
+ check_seq(mt, 100, false);
+ /* Store 45-168 */
+ mt_set_non_kernel(10);
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Create tree of 1-200 */
+ check_seq(mt, 200, false);
+ /* Store 45-168 */
+ check_store_range(mt, r[10], r[11], xa_mk_value(r[10]), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ check_seq(mt, 30, false);
+ check_store_range(mt, 6, 18, xa_mk_value(6), 0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Overwrite across multiple levels. */
+ /* Create tree of 1-400 */
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ /* Store 118-128 */
+ check_store_range(mt, r[12], r[13], xa_mk_value(r[12]), 0);
+ mt_set_non_kernel(50);
+ mtree_test_erase(mt, 140);
+ mtree_test_erase(mt, 141);
+ mtree_test_erase(mt, 142);
+ mtree_test_erase(mt, 143);
+ mtree_test_erase(mt, 130);
+ mtree_test_erase(mt, 131);
+ mtree_test_erase(mt, 132);
+ mtree_test_erase(mt, 133);
+ mtree_test_erase(mt, 134);
+ mtree_test_erase(mt, 135);
+ check_load(mt, r[12], xa_mk_value(r[12]));
+ check_load(mt, r[13], xa_mk_value(r[12]));
+ check_load(mt, r[13] - 1, xa_mk_value(r[12]));
+ check_load(mt, r[13] + 1, xa_mk_value(r[13] + 1));
+ check_load(mt, 135, NULL);
+ check_load(mt, 140, NULL);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+
+ /* Overwrite multiple levels at the end of the tree (slot 7) */
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ check_store_range(mt, 347, 352, xa_mk_value(347), 0);
+
+ check_load(mt, 346, xa_mk_value(346));
+ for (i = 347; i <= 352; i++)
+ check_load(mt, i, xa_mk_value(347));
+ for (i = 353; i <= 361; i++)
+ check_load(mt, i, xa_mk_value(353));
+ check_load(mt, 362, xa_mk_value(362));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 363, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 363; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 364, NULL);
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_set_non_kernel(5);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 352, 364, NULL, 0);
+ check_store_range(mt, 351, 364, xa_mk_value(352), 0);
+ check_load(mt, 350, xa_mk_value(350));
+ check_load(mt, 351, xa_mk_value(352));
+ for (i = 352; i <= 364; i++)
+ check_load(mt, i, xa_mk_value(352));
+ check_load(mt, 365, xa_mk_value(365));
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+
+ mt_set_non_kernel(50);
+ check_seq(mt, 400, false);
+ check_store_range(mt, 362, 367, xa_mk_value(362), 0);
+ check_store_range(mt, 353, 361, xa_mk_value(353), 0);
+ mt_set_non_kernel(0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+ /*
+ * Interesting cases:
+ * 1. Overwrite the end of a node and end in the first entry of the next
+ * node.
+ * 2. Split a single range
+ * 3. Overwrite the start of a range
+ * 4. Overwrite the end of a range
+ * 5. Overwrite the entire range
+ * 6. Overwrite a range that causes multiple parent nodes to be
+ * combined
+ * 7. Overwrite a range that causes multiple parent nodes and part of
+ * root to be combined
+ * 8. Overwrite the whole tree
+ * 9. Try to overwrite the zero entry of an alloc tree.
+ * 10. Write a range larger than a nodes current pivot
+ */
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2400, 2400, xa_mk_value(2400), 0);
+ check_store_range(mt, 2411, 2411, xa_mk_value(2411), 0);
+ check_store_range(mt, 2412, 2412, xa_mk_value(2412), 0);
+ check_store_range(mt, 2396, 2400, xa_mk_value(4052020), 0);
+ check_store_range(mt, 2402, 2402, xa_mk_value(2402), 0);
+ mtree_destroy(mt);
+ mt_set_non_kernel(0);
+
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 500; i++) {
+ val = i*5;
+ val2 = (i+1)*5;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 2422, 2422, xa_mk_value(2422), 0);
+ check_store_range(mt, 2424, 2424, xa_mk_value(2424), 0);
+ check_store_range(mt, 2425, 2425, xa_mk_value(2), 0);
+ check_store_range(mt, 2460, 2470, NULL, 0);
+ check_store_range(mt, 2435, 2460, xa_mk_value(2435), 0);
+ check_store_range(mt, 2461, 2470, xa_mk_value(2461), 0);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ /* Test rebalance gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(50);
+ for (i = 0; i <= 50; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 161, 161, xa_mk_value(161), 0);
+ check_store_range(mt, 162, 162, xa_mk_value(162), 0);
+ check_store_range(mt, 163, 163, xa_mk_value(163), 0);
+ check_store_range(mt, 240, 249, NULL, 0);
+ mtree_erase(mt, 200);
+ mtree_erase(mt, 210);
+ mtree_erase(mt, 220);
+ mtree_erase(mt, 230);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4600, 4959, xa_mk_value(1), 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 500; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ }
+ check_store_range(mt, 4811, 4811, xa_mk_value(4811), 0);
+ check_store_range(mt, 4812, 4812, xa_mk_value(4812), 0);
+ check_store_range(mt, 4861, 4861, xa_mk_value(4861), 0);
+ check_store_range(mt, 4862, 4862, xa_mk_value(4862), 0);
+ check_store_range(mt, 4842, 4849, NULL, 0);
+ mt_validate(mt);
+ MT_BUG_ON(mt, !mt_height(mt));
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1300; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Cause a 3 child split all the way up the tree. */
+ for (i = 5; i < 215; i += 10)
+ check_store_range(mt, 11450 + i, 11450 + i + 1, NULL, 0);
+ for (i = 5; i < 65; i += 10)
+ check_store_range(mt, 11770 + i, 11770 + i + 1, NULL, 0);
+
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ for (i = 5; i < 45; i += 10)
+ check_store_range(mt, 11700 + i, 11700 + i + 1, NULL, 0);
+ MT_BUG_ON(mt, mt_height(mt) < 4);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 1200; i++) {
+ val = i*10;
+ val2 = (i+1)*10;
+ check_store_range(mt, val, val2, xa_mk_value(val), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ }
+ /* Fill parents and leaves before split. */
+ for (i = 5; i < 455; i += 10)
+ check_store_range(mt, 7800 + i, 7800 + i + 1, NULL, 0);
+
+ for (i = 1; i < 16; i++)
+ check_store_range(mt, 8185 + i, 8185 + i + 1,
+ xa_mk_value(8185+i), 0);
+ MT_BUG_ON(mt, mt_height(mt) >= 4);
+ /* triple split across multiple levels. */
+ check_store_range(mt, 8184, 8184, xa_mk_value(8184), 0);
+ MT_BUG_ON(mt, mt_height(mt) != 4);
+}
+
+static noinline void check_next_entry(struct maple_tree *mt)
+{
+ void *entry = NULL;
+ unsigned long limit = 30, i = 0;
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ MA_STATE(mas, mt, i, i);
+
+ check_seq(mt, limit, false);
+ rcu_read_lock();
+
+ /* Check the first one and get ma_state in the correct state. */
+ MT_BUG_ON(mt, mas_walk(&mas) != xa_mk_value(i++));
+ for ( ; i <= limit + 1; i++) {
+ entry = mas_next(&mas, limit);
+ if (i > limit)
+ MT_BUG_ON(mt, entry != NULL);
+ else
+ MT_BUG_ON(mt, xa_mk_value(i) != entry);
+ }
+ rcu_read_unlock();
+ mtree_destroy(mt);
+}
+
+static noinline void check_prev_entry(struct maple_tree *mt)
+{
+ unsigned long index = 16;
+ void *value;
+ int i;
+
+ MA_STATE(mas, mt, index, index);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, 30, false);
+
+ rcu_read_lock();
+ value = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, value != xa_mk_value(index));
+ value = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, value != xa_mk_value(index - 1));
+ rcu_read_unlock();
+ mtree_destroy(mt);
+
+ /* Check limits on prev */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_lock(&mas);
+ for (i = 0; i <= index; i++) {
+ mas_set_range(&mas, i*10, i*10+5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ mas_set(&mas, 20);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(2));
+
+ value = mas_prev(&mas, 19);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_set(&mas, 80);
+ value = mas_walk(&mas);
+ MT_BUG_ON(mt, value != xa_mk_value(8));
+
+ value = mas_prev(&mas, 76);
+ MT_BUG_ON(mt, value != NULL);
+
+ mas_unlock(&mas);
+}
+
+static noinline void check_root_expand(struct maple_tree *mt)
+{
+ MA_STATE(mas, mt, 0, 0);
+ void *ptr;
+
+
+ mas_lock(&mas);
+ mas_set(&mas, 3);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ ptr = &check_prev_entry;
+ mas_set(&mas, 1);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 0);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+
+ mas_set(&mas, 1);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+
+ mas_set(&mas, 2);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = &check_prev_entry;
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+
+ mas_set(&mas, 5);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, mas.index != 1);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ mas_set_range(&mas, 0, 100);
+ ptr = mas_walk(&mas);
+ MT_BUG_ON(mt, ptr != &check_prev_entry);
+ MT_BUG_ON(mt, mas.last != 0);
+ mas_unlock(&mas);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 1UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 1UL));
+
+ mas_unlock(&mas);
+
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, 0);
+ mas_lock(&mas);
+ mas_set(&mas, 0);
+ ptr = (void *)((unsigned long) check_prev_entry | 2UL);
+ mas_store_gfp(&mas, ptr, GFP_KERNEL);
+ ptr = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, ptr != NULL);
+ MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX));
+
+ mas_set(&mas, 1);
+ ptr = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, (mas.index != 0) && (mas.last != 0));
+ MT_BUG_ON(mt, ptr != (void *)((unsigned long) check_prev_entry | 2UL));
+
+
+ mas_unlock(&mas);
+}
+
+static noinline void check_prealloc(struct maple_tree *mt)
+{
+ unsigned long i, max = 100;
+ unsigned long allocated;
+ unsigned char height;
+ struct maple_node *mn;
+ void *ptr = check_prealloc;
+ MA_STATE(mas, mt, 10, 20);
+
+ mt_set_non_kernel(1000);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ ma_free_rcu(mn);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ ma_free_rcu(mn);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mn = mas_pop_node(&mas);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated - 1);
+ mas_push_node(&mas, mn);
+ MT_BUG_ON(mt, mas_allocated(&mas) != allocated);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ mas_destroy(&mas);
+ allocated = mas_allocated(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_store_prealloc(&mas, ptr);
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mt_set_non_kernel(1);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+ mas_destroy(&mas);
+
+
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL) != 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated == 0);
+ MT_BUG_ON(mt, allocated != 1 + height * 3);
+ mas_store_prealloc(&mas, ptr);
+ MT_BUG_ON(mt, mas_allocated(&mas) != 0);
+ mt_set_non_kernel(1);
+ MT_BUG_ON(mt, mas_preallocate(&mas, ptr, GFP_KERNEL & GFP_NOWAIT) == 0);
+ allocated = mas_allocated(&mas);
+ height = mas_mt_height(&mas);
+ MT_BUG_ON(mt, allocated != 0);
+}
+
+static noinline void check_spanning_write(struct maple_tree *mt)
+{
+ unsigned long i, max = 5000;
+ MA_STATE(mas, mt, 1200, 2380);
+
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ for (i = 1; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_set_range(&mas, 9, 50006); /* Will expand to 0 - ULONG_MAX */
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a right cousin rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 12900); /* Spans more than 2 levels */
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 1205);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test non-alloc tree spanning store */
+ mt_init_flags(mt, 0);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 300);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 15);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a right sibling rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 0, 12865);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 15);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a left sibling rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 90, 13665);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 95);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store that requires a left cousin rebalance */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 46805, 49995);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 46815);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Test spanning store that requires a left cousin rebalance all the way
+ * to root
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mas_set_range(&mas, 32395, 49995);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 46815);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /*
+ * Test spanning store that requires a right cousin rebalance all the
+ * way to root
+ */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mas_set_range(&mas, 38875, 43190);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 38900);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning store ending at full node (depth 2)*/
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mtree_lock(mt);
+ mas_set(&mas, 47606);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47607);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47608);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ mas_set(&mas, 47609);
+ mas_store_gfp(&mas, check_spanning_write, GFP_KERNEL);
+ /* Ensure the parent node is full */
+ mas_ascend(&mas);
+ MT_BUG_ON(mt, (mas_data_end(&mas)) != mt_slot_count(mas.node) - 1);
+ mas_set_range(&mas, 11516, 48940);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ /* Test spanning write with many levels of no siblings */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ mas_set_range(&mas, 43200, 49999);
+ mtree_lock(mt);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mas_set(&mas, 43200);
+ MT_BUG_ON(mt, mas_walk(&mas) != NULL);
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= 100; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+
+ mtree_lock(mt);
+ mas_set_range(&mas, 76, 875);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ mtree_unlock(mt);
+}
+
+static noinline void check_null_expand(struct maple_tree *mt)
+{
+ unsigned long i, max = 100;
+ unsigned char data_end;
+ MA_STATE(mas, mt, 959, 959);
+
+ for (i = 0; i <= max; i++)
+ mtree_test_store_range(mt, i * 10, i * 10 + 5, &i);
+ /* Test expanding null at start. */
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 959, 963);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 963) != NULL);
+ MT_BUG_ON(mt, data_end != mas_data_end(&mas));
+
+ /* Test expanding null at end. */
+ mas_set(&mas, 880);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 884, 887);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 884) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 889) != NULL);
+ MT_BUG_ON(mt, data_end != mas_data_end(&mas));
+
+ /* Test expanding null at start and end. */
+ mas_set(&mas, 890);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 900, 905);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 899) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 900) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 905) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 906) != NULL);
+ MT_BUG_ON(mt, data_end - 2 != mas_data_end(&mas));
+
+ /* Test expanding null across multiple slots. */
+ mas_set(&mas, 800);
+ mas_walk(&mas);
+ data_end = mas_data_end(&mas);
+ mas_set_range(&mas, 810, 825);
+ mas_store_gfp(&mas, NULL, GFP_KERNEL);
+ MT_BUG_ON(mt, mtree_load(mt, 809) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 810) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 825) != NULL);
+ MT_BUG_ON(mt, mtree_load(mt, 826) != NULL);
+ MT_BUG_ON(mt, data_end - 4 != mas_data_end(&mas));
+}
+
+static noinline void check_gap_combining(struct maple_tree *mt)
+{
+ struct maple_enode *mn1, *mn2;
+ void *entry;
+
+ unsigned long seq100[] = {
+ /* 0-5 */
+ 74, 75, 76,
+ 50, 100, 2,
+
+ /* 6-12 */
+ 44, 45, 46, 43,
+ 20, 50, 3,
+
+ /* 13-20*/
+ 80, 81, 82,
+ 76, 2, 79, 85, 4,
+ };
+ unsigned long seq2000[] = {
+ 1152, 1151,
+ 1100, 1200, 2,
+ };
+ unsigned long seq400[] = {
+ 286, 318,
+ 256, 260, 266, 270, 275, 280, 290, 398,
+ 286, 310,
+ };
+
+ unsigned long index = seq100[0];
+
+ MA_STATE(mas, mt, index, index);
+
+ MT_BUG_ON(mt, !mtree_empty(mt));
+ check_seq(mt, 100, false); /* create 100 singletons. */
+
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq100[2]);
+ check_load(mt, seq100[2], NULL);
+ mtree_test_erase(mt, seq100[1]);
+ check_load(mt, seq100[1], NULL);
+
+ rcu_read_lock();
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the test. */
+
+ /*
+ * At this point, there is a gap of 2 at index + 1 between seq100[3] and
+ * seq100[4]. Search for the gap.
+ */
+ mt_set_non_kernel(1);
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[3], seq100[4],
+ seq100[5]));
+ MT_BUG_ON(mt, mas.index != index + 1);
+ rcu_read_unlock();
+
+ mtree_test_erase(mt, seq100[6]);
+ check_load(mt, seq100[6], NULL);
+ mtree_test_erase(mt, seq100[7]);
+ check_load(mt, seq100[7], NULL);
+ mtree_test_erase(mt, seq100[8]);
+ index = seq100[9];
+
+ rcu_read_lock();
+ mas.index = index;
+ mas.last = index;
+ mas_reset(&mas);
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index));
+ mn1 = mas.node;
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != xa_mk_value(index + 4));
+ mas_next(&mas, ULONG_MAX); /* go to the next entry. */
+ mn2 = mas.node;
+ MT_BUG_ON(mt, mn1 == mn2); /* test the next entry is in the next node. */
+
+ /*
+ * At this point, there is a gap of 3 at seq100[6]. Find it by
+ * searching 20 - 50 for size 3.
+ */
+ mas_reset(&mas);
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[10], seq100[11],
+ seq100[12]));
+ MT_BUG_ON(mt, mas.index != seq100[6]);
+ rcu_read_unlock();
+
+ mt_set_non_kernel(1);
+ mtree_store(mt, seq100[13], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], xa_mk_value(seq100[14]));
+ mtree_store(mt, seq100[14], NULL, GFP_KERNEL);
+ check_load(mt, seq100[13], NULL);
+ check_load(mt, seq100[14], NULL);
+
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[15],
+ seq100[17]));
+ MT_BUG_ON(mt, mas.index != seq100[13]);
+ mt_validate(mt);
+ rcu_read_unlock();
+
+ /*
+ * *DEPRECATED: no retries anymore* Test retry entry in the start of a
+ * gap.
+ */
+ mt_set_non_kernel(2);
+ mtree_test_store_range(mt, seq100[18], seq100[14], NULL);
+ mtree_test_erase(mt, seq100[15]);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq100[16], seq100[19],
+ seq100[20]));
+ rcu_read_unlock();
+ MT_BUG_ON(mt, mas.index != seq100[18]);
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 2000 tests are for multi-level tree gaps */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 2000, false);
+ mt_set_non_kernel(1);
+ mtree_test_erase(mt, seq2000[0]);
+ mtree_test_erase(mt, seq2000[1]);
+
+ mt_set_non_kernel(2);
+ mas_reset(&mas);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_empty_area_rev(&mas, seq2000[2], seq2000[3],
+ seq2000[4]));
+ MT_BUG_ON(mt, mas.index != seq2000[1]);
+ rcu_read_unlock();
+ mt_validate(mt);
+ mtree_destroy(mt);
+
+ /* seq 400 tests rebalancing over two levels. */
+ mt_set_non_kernel(99);
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mtree_test_store_range(mt, seq400[0], seq400[1], NULL);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_seq(mt, 400, false);
+ mt_set_non_kernel(50);
+ mtree_test_store_range(mt, seq400[2], seq400[9],
+ xa_mk_value(seq400[2]));
+ mtree_test_store_range(mt, seq400[3], seq400[9],
+ xa_mk_value(seq400[3]));
+ mtree_test_store_range(mt, seq400[4], seq400[9],
+ xa_mk_value(seq400[4]));
+ mtree_test_store_range(mt, seq400[5], seq400[9],
+ xa_mk_value(seq400[5]));
+ mtree_test_store_range(mt, seq400[0], seq400[9],
+ xa_mk_value(seq400[0]));
+ mtree_test_store_range(mt, seq400[6], seq400[9],
+ xa_mk_value(seq400[6]));
+ mtree_test_store_range(mt, seq400[7], seq400[9],
+ xa_mk_value(seq400[7]));
+ mtree_test_store_range(mt, seq400[8], seq400[9],
+ xa_mk_value(seq400[8]));
+ mtree_test_store_range(mt, seq400[10], seq400[11],
+ xa_mk_value(seq400[10]));
+ mt_validate(mt);
+ mt_set_non_kernel(0);
+ mtree_destroy(mt);
+}
+static noinline void check_node_overwrite(struct maple_tree *mt)
+{
+ int i, max = 4000;
+
+ for (i = 0; i < max; i++)
+ mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100));
+
+ mtree_test_store_range(mt, 319951, 367950, NULL);
+ /*mt_dump(mt); */
+ mt_validate(mt);
+}
+
+static void mas_dfs_preorder(struct ma_state *mas)
+{
+
+ struct maple_enode *prev;
+ unsigned char end, slot = 0;
+
+ if (mas_is_start(mas)) {
+ mas_start(mas);
+ return;
+ }
+
+ if (mte_is_leaf(mas->node) && mte_is_root(mas->node))
+ goto done;
+
+walk_up:
+ end = mas_data_end(mas);
+ if (mte_is_leaf(mas->node) ||
+ (slot > end)) {
+ if (mte_is_root(mas->node))
+ goto done;
+
+ slot = mte_parent_slot(mas->node) + 1;
+ mas_ascend(mas);
+ goto walk_up;
+ }
+
+ prev = mas->node;
+ mas->node = mas_get_slot(mas, slot);
+ if (!mas->node || slot > end) {
+ if (mte_is_root(prev))
+ goto done;
+
+ mas->node = prev;
+ slot = mte_parent_slot(mas->node) + 1;
+ mas_ascend(mas);
+ goto walk_up;
+ }
+
+ return;
+done:
+ mas->node = MAS_NONE;
+}
+
+
+static void check_dfs_preorder(struct maple_tree *mt)
+{
+ unsigned long count = 0, max = 1000;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ check_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ MT_BUG_ON(mt, count != 74);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_reset(&mas);
+ count = 0;
+ check_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ /*printk("count %lu\n", count); */
+ MT_BUG_ON(mt, count != 77);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_reset(&mas);
+ count = 0;
+ check_rev_seq(mt, max, false);
+ do {
+ count++;
+ mas_dfs_preorder(&mas);
+ } while (!mas_is_none(&mas));
+ /*printk("count %lu\n", count); */
+ MT_BUG_ON(mt, count != 77);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ mas_reset(&mas);
+ mt_zero_nr_tallocated();
+ mt_set_non_kernel(200);
+ mas_expected_entries(&mas, max);
+ for (count = 0; count <= max; count++) {
+ mas.index = mas.last = count;
+ mas_store(&mas, xa_mk_value(count));
+ MT_BUG_ON(mt, mas_is_err(&mas));
+ }
+ mas_destroy(&mas);
+ rcu_barrier();
+ /*
+ * pr_info(" ->seq test of 0-%lu %luK in %d active (%d total)\n",
+ * max, mt_get_alloc_size()/1024, mt_nr_allocated(),
+ * mt_nr_tallocated());
+ */
+
+}
+
+#if defined(BENCH_SLOT_STORE)
+static noinline void bench_slot_store(struct maple_tree *mt)
+{
+ int i, brk = 105, max = 1040, brk_start = 100, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, brk, brk, NULL, GFP_KERNEL);
+ mtree_store_range(mt, brk_start, brk, xa_mk_value(brk),
+ GFP_KERNEL);
+ }
+}
+#endif
+
+#if defined(BENCH_NODE_STORE)
+static noinline void bench_node_store(struct maple_tree *mt)
+{
+ int i, overwrite = 76, max = 240, count = 20000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mtree_store_range(mt, overwrite, overwrite + 15,
+ xa_mk_value(overwrite), GFP_KERNEL);
+
+ overwrite += 5;
+ if (overwrite >= 135)
+ overwrite = 76;
+ }
+}
+#endif
+
+#if defined(BENCH_AWALK)
+static noinline void bench_awalk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 50000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ mtree_store_range(mt, 1470, 1475, NULL, GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_empty_area_rev(&mas, 0, 2000, 10);
+ mas_reset(&mas);
+ }
+}
+#endif
+#if defined(BENCH_WALK)
+static noinline void bench_walk(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+ MA_STATE(mas, mt, 1470, 1470);
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ mas_walk(&mas);
+ mas_reset(&mas);
+ }
+
+}
+#endif
+
+#if defined(BENCH_MT_FOR_EACH)
+static noinline void bench_mt_for_each(struct maple_tree *mt)
+{
+ int i, count = 1000000;
+ unsigned long max = 2500, index = 0;
+ void *entry;
+
+ for (i = 0; i < max; i += 5)
+ mtree_store_range(mt, i, i + 4, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++) {
+ unsigned long j = 0;
+
+ mt_for_each(mt, entry, index, max) {
+ MT_BUG_ON(mt, entry != xa_mk_value(j));
+ j += 5;
+ }
+
+ index = 0;
+ }
+
+}
+#endif
+
+static noinline void check_forking(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 134;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_reset(&mas);
+ mas.index = 0;
+ mas.last = 0;
+ if (mas_expected_entries(&newmas, nr_entries)) {
+ pr_err("OOM!");
+ BUG_ON(1);
+ }
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, val);
+ }
+ mas_destroy(&newmas);
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ mtree_destroy(&newmt);
+}
+
+static noinline void check_mas_store_gfp(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 135;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_set(&mas, 0);
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store_gfp(&newmas, val, GFP_KERNEL);
+ }
+
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ mtree_destroy(&newmt);
+}
+
+#if defined(BENCH_FORK)
+static noinline void bench_forking(struct maple_tree *mt)
+{
+
+ struct maple_tree newmt;
+ int i, nr_entries = 134, nr_fork = 80000;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, mt, 0, 0);
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < nr_fork; i++) {
+ mt_set_non_kernel(99999);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ newmas.tree = &newmt;
+ mas_reset(&newmas);
+ mas_reset(&mas);
+ mas.index = 0;
+ mas.last = 0;
+ if (mas_expected_entries(&newmas, nr_entries)) {
+ printk("OOM!");
+ BUG_ON(1);
+ }
+ mas_for_each(&mas, val, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, val);
+ }
+ mas_destroy(&newmas);
+ mt_validate(&newmt);
+ mt_set_non_kernel(0);
+ mtree_destroy(&newmt);
+ }
+}
+#endif
+
+static noinline void next_prev_test(struct maple_tree *mt)
+{
+ int i, nr_entries = 200;
+ void *val;
+ MA_STATE(mas, mt, 0, 0);
+ struct maple_enode *mn;
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i <= nr_entries / 2; i++) {
+ mas_next(&mas, 1000);
+ if (mas_is_none(&mas))
+ break;
+
+ }
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ i++;
+ }
+
+ mas_reset(&mas);
+ mas_set(&mas, 0);
+ i = 0;
+ mas_for_each(&mas, val, 1000) {
+ mas_pause(&mas);
+ i++;
+ }
+
+ /*
+ * 680 - 685 = 0x61a00001930c
+ * 686 - 689 = NULL;
+ * 690 - 695 = 0x61a00001930c
+ * Check simple next/prev
+ */
+ mas_set(&mas, 686);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(680 / 10));
+ MT_BUG_ON(mt, mas.index != 680);
+ MT_BUG_ON(mt, mas.last != 685);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(690 / 10));
+ MT_BUG_ON(mt, mas.index != 690);
+ MT_BUG_ON(mt, mas.last != 695);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(700 / 10));
+ MT_BUG_ON(mt, mas.index != 700);
+ MT_BUG_ON(mt, mas.last != 705);
+
+ /* Check across node boundaries of the tree */
+ mas_set(&mas, 70);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(80 / 10));
+ MT_BUG_ON(mt, mas.index != 80);
+ MT_BUG_ON(mt, mas.last != 85);
+
+ val = mas_prev(&mas, 70);
+ MT_BUG_ON(mt, val != xa_mk_value(70 / 10));
+ MT_BUG_ON(mt, mas.index != 70);
+ MT_BUG_ON(mt, mas.last != 75);
+
+ /* Check across two levels of the tree */
+ mas_reset(&mas);
+ mas_set(&mas, 707);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != NULL);
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(710 / 10));
+ MT_BUG_ON(mt, mas.index != 710);
+ MT_BUG_ON(mt, mas.last != 715);
+ mn = mas.node;
+
+ val = mas_next(&mas, 1000);
+ MT_BUG_ON(mt, val != xa_mk_value(720 / 10));
+ MT_BUG_ON(mt, mas.index != 720);
+ MT_BUG_ON(mt, mas.last != 725);
+ MT_BUG_ON(mt, mn == mas.node);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(710 / 10));
+ MT_BUG_ON(mt, mas.index != 710);
+ MT_BUG_ON(mt, mas.last != 715);
+
+ /* Check running off the end and back on */
+ mas_reset(&mas);
+ mas_set(&mas, 2000);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(2000 / 10));
+ MT_BUG_ON(mt, mas.index != 2000);
+ MT_BUG_ON(mt, mas.last != 2005);
+
+ val = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != ULONG_MAX);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(2000 / 10));
+ MT_BUG_ON(mt, mas.index != 2000);
+ MT_BUG_ON(mt, mas.last != 2005);
+
+ /* Check running off the start and back on */
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ val = mas_walk(&mas);
+ MT_BUG_ON(mt, val != xa_mk_value(1));
+ MT_BUG_ON(mt, mas.index != 10);
+ MT_BUG_ON(mt, mas.last != 15);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != xa_mk_value(0));
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 5);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+
+ mas.index = 0;
+ mas.last = 5;
+ mas_store(&mas, NULL);
+ mas_reset(&mas);
+ mas_set(&mas, 10);
+ mas_walk(&mas);
+
+ val = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, val != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0);
+
+ mtree_destroy(mt);
+
+ mt_init(mt);
+ mtree_store_range(mt, 0, 0, xa_mk_value(0), GFP_KERNEL);
+ mtree_store_range(mt, 5, 5, xa_mk_value(5), GFP_KERNEL);
+ mas_set(&mas, 5);
+ val = mas_prev(&mas, 4);
+ MT_BUG_ON(mt, val != NULL);
+}
+
+#define RCU_RANGE_COUNT 1000
+#define RCU_MT_BUG_ON(test, y) {if (y) { test->stop = true;} MT_BUG_ON(test->mt, y);}
+struct rcu_test_struct2 {
+ struct maple_tree *mt;
+
+ bool start;
+ bool stop;
+ unsigned int thread_count;
+
+ unsigned int seen_toggle;
+ unsigned int seen_added;
+ unsigned int seen_modified;
+ unsigned int seen_deleted;
+ int pause;
+
+ unsigned long index[RCU_RANGE_COUNT];
+ unsigned long last[RCU_RANGE_COUNT];
+};
+
+struct rcu_reader_struct {
+ unsigned int id;
+ int mod;
+ int del;
+ int flip;
+ int add;
+ int next;
+ struct rcu_test_struct2 *test;
+};
+
+/* RCU reader helper function */
+static void rcu_reader_register(struct rcu_test_struct2 *test)
+{
+ rcu_register_thread();
+ uatomic_inc(&test->thread_count);
+
+ while (!test->start)
+ usleep(test->pause * 100);
+}
+
+static void rcu_reader_setup(struct rcu_reader_struct *reader,
+ unsigned int id, struct rcu_test_struct2 *test)
+{
+ reader->id = id;
+ reader->test = test;
+ reader->mod = reader->id % 10;
+ reader->del = (reader->mod + 1) % 10;
+ reader->flip = (reader->mod + 2) % 10;
+ reader->add = (reader->mod + 3) % 10;
+ reader->next = (reader->mod + 4) % 10;
+}
+
+/* RCU reader in increasing index */
+static void *rcu_reader_fwd(void *ptr)
+{
+ struct rcu_reader_struct *reader = (struct rcu_reader_struct *)ptr;
+ struct rcu_test_struct2 *test = reader->test;
+ unsigned long index = reader->id;
+ bool toggled, modified, deleted, added;
+ int i;
+ void *entry, *prev = NULL;
+ MA_STATE(mas, test->mt, 0, 0);
+
+ rcu_reader_register(test);
+ toggled = modified = deleted = added = false;
+
+ while (!test->stop) {
+ i = 0;
+ /* mas_for_each ?*/
+ rcu_read_lock();
+ mas_set(&mas, test->index[index]);
+ mas_for_each(&mas, entry, test->last[index + 9]) {
+ unsigned long r_start, r_end, alt_start;
+ void *expected, *alt;
+
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+
+ if (i == reader->del) {
+ if (!deleted) {
+ alt_start = test->index[index + reader->flip];
+ /* delete occurred. */
+ if (mas.index == alt_start) {
+ uatomic_inc(&test->seen_deleted);
+ deleted = true;
+ }
+ }
+ if (deleted) {
+ i = reader->flip;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ if (!added && (i == reader->add)) {
+ alt_start = test->index[index + reader->next];
+ if (mas.index == r_start) {
+ uatomic_inc(&test->seen_added);
+ added = true;
+ } else if (mas.index == alt_start) {
+ i = reader->next;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ RCU_MT_BUG_ON(test, mas.index != r_start);
+ RCU_MT_BUG_ON(test, mas.last != r_end);
+
+ if (i == reader->flip) {
+ alt = xa_mk_value(index + i + RCU_RANGE_COUNT);
+ if (prev) {
+ if (toggled && entry == expected)
+ uatomic_inc(&test->seen_toggle);
+ else if (!toggled && entry == alt)
+ uatomic_inc(&test->seen_toggle);
+ }
+
+ if (entry == expected)
+ toggled = false;
+ else if (entry == alt)
+ toggled = true;
+ else {
+ printk("!!%lu-%lu -> %p not %p or %p\n", mas.index, mas.last, entry, expected, alt);
+ RCU_MT_BUG_ON(test, 1);
+ }
+
+ prev = entry;
+ } else if (i == reader->mod) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+ if (entry != expected) {
+ if (!modified)
+ uatomic_inc(&test->seen_modified);
+ modified = true;
+ } else {
+ if (modified)
+ uatomic_inc(&test->seen_modified);
+ modified = false;
+ }
+
+ if (modified)
+ RCU_MT_BUG_ON(test, entry != alt);
+
+ } else {
+ if (entry != expected)
+ printk("!!%lu-%lu -> %p not %p\n", mas.index, mas.last, entry, expected);
+ RCU_MT_BUG_ON(test, entry != expected);
+ }
+
+ i++;
+ }
+ rcu_read_unlock();
+ usleep(test->pause);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/* RCU reader in decreasing index */
+static void *rcu_reader_rev(void *ptr)
+{
+ struct rcu_reader_struct *reader = (struct rcu_reader_struct *)ptr;
+ struct rcu_test_struct2 *test = reader->test;
+ unsigned long index = reader->id;
+ bool toggled, modified, deleted, added;
+ int i;
+ void *prev = NULL;
+ MA_STATE(mas, test->mt, 0, 0);
+
+ rcu_reader_register(test);
+ toggled = modified = deleted = added = false;
+
+
+ while (!test->stop) {
+ void *entry;
+
+ i = 9;
+ mas_set(&mas, test->index[index + i]);
+
+ rcu_read_lock();
+ while (i--) {
+ unsigned long r_start, r_end, alt_start;
+ void *expected, *alt;
+ int line = __LINE__;
+
+ entry = mas_prev(&mas, test->index[index]);
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+
+ if (i == reader->del) {
+ alt_start = test->index[index + reader->mod];
+ if (mas.index == alt_start) {
+ line = __LINE__;
+ if (!deleted)
+ uatomic_inc(&test->seen_deleted);
+ deleted = true;
+ }
+ if (deleted) {
+ line = __LINE__;
+ i = reader->mod;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+ if (!added && (i == reader->add)) {
+ alt_start = test->index[index + reader->flip];
+ if (mas.index == r_start) {
+ line = __LINE__;
+ uatomic_inc(&test->seen_added);
+ added = true;
+ } else if (mas.index == alt_start) {
+ line = __LINE__;
+ i = reader->flip;
+ r_start = test->index[index + i];
+ r_end = test->last[index + i];
+ expected = xa_mk_value(r_start);
+ }
+ }
+
+ if (i == reader->mod)
+ line = __LINE__;
+ else if (i == reader->flip)
+ line = __LINE__;
+
+ if (mas.index != r_start) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+ mt_dump(test->mt);
+ printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n",
+ mas.index, mas.last, entry,
+ r_start, r_end, expected, alt,
+ line, i);
+ }
+ RCU_MT_BUG_ON(test, mas.index != r_start);
+ RCU_MT_BUG_ON(test, mas.last != r_end);
+
+ if (i == reader->mod) {
+ alt = xa_mk_value(index + i * 2 + 1 +
+ RCU_RANGE_COUNT);
+
+ if (entry != expected) {
+ if (!modified)
+ uatomic_inc(&test->seen_modified);
+ modified = true;
+ } else {
+ if (modified)
+ uatomic_inc(&test->seen_modified);
+ modified = false;
+ }
+ if (modified)
+ RCU_MT_BUG_ON(test, entry != alt);
+
+
+ } else if (i == reader->flip) {
+ alt = xa_mk_value(index + i +
+ RCU_RANGE_COUNT);
+ if (prev) {
+ if (toggled && entry == expected)
+ uatomic_inc(&test->seen_toggle);
+ else if (!toggled && entry == alt)
+ uatomic_inc(&test->seen_toggle);
+ }
+
+ if (entry == expected)
+ toggled = false;
+ else if (entry == alt)
+ toggled = true;
+ else {
+ printk("%lu-%lu %p != %p or %p\n",
+ mas.index, mas.last, entry,
+ expected, alt);
+ RCU_MT_BUG_ON(test, 1);
+ }
+
+ prev = entry;
+ } else {
+ if (entry != expected)
+ printk("%lu-%lu %p != %p\n", mas.index,
+ mas.last, entry, expected);
+ RCU_MT_BUG_ON(test, entry != expected);
+ }
+ }
+ rcu_read_unlock();
+ usleep(test->pause);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static void rcu_stress_rev(struct maple_tree *mt, struct rcu_test_struct2 *test,
+ int count, struct rcu_reader_struct *test_reader)
+{
+ int i, j = 10000;
+ bool toggle = true;
+
+ test->start = true; /* Release the hounds! */
+ usleep(5);
+
+ while (j--) {
+ toggle = !toggle;
+ i = count;
+ while (i--) {
+ unsigned long start, end;
+ struct rcu_reader_struct *this = &test_reader[i];
+
+ /* Mod offset */
+ if (j == 600) {
+ start = test->index[this->id + this->mod];
+ end = test->last[this->id + this->mod];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(this->id + this->mod * 2 +
+ 1 + RCU_RANGE_COUNT),
+ GFP_KERNEL);
+ }
+
+ /* Toggle */
+ if (!(j % 5)) {
+ start = test->index[this->id + this->flip];
+ end = test->last[this->id + this->flip];
+ mtree_store_range(mt, start, end,
+ xa_mk_value((toggle ? start :
+ this->id + this->flip +
+ RCU_RANGE_COUNT)),
+ GFP_KERNEL);
+ }
+
+ /* delete */
+ if (j == 400) {
+ start = test->index[this->id + this->del];
+ end = test->last[this->id + this->del];
+ mtree_store_range(mt, start, end, NULL, GFP_KERNEL);
+ }
+
+ /* add */
+ if (j == 500) {
+ start = test->index[this->id + this->add];
+ end = test->last[this->id + this->add];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(start), GFP_KERNEL);
+ }
+ }
+ usleep(test->pause);
+ /* If a test fails, don't flood the console */
+ if (test->stop)
+ break;
+ }
+}
+
+static void rcu_stress_fwd(struct maple_tree *mt, struct rcu_test_struct2 *test,
+ int count, struct rcu_reader_struct *test_reader)
+{
+ int j, i;
+ bool toggle = true;
+
+ test->start = true; /* Release the hounds! */
+ usleep(5);
+ for (j = 0; j < 10000; j++) {
+ toggle = !toggle;
+ for (i = 0; i < count; i++) {
+ unsigned long start, end;
+ struct rcu_reader_struct *this = &test_reader[i];
+
+ /* Mod offset */
+ if (j == 600) {
+ start = test->index[this->id + this->mod];
+ end = test->last[this->id + this->mod];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(this->id + this->mod * 2 +
+ 1 + RCU_RANGE_COUNT),
+ GFP_KERNEL);
+ }
+
+ /* Toggle */
+ if (!(j % 5)) {
+ start = test->index[this->id + this->flip];
+ end = test->last[this->id + this->flip];
+ mtree_store_range(mt, start, end,
+ xa_mk_value((toggle ? start :
+ this->id + this->flip +
+ RCU_RANGE_COUNT)),
+ GFP_KERNEL);
+ }
+
+ /* delete */
+ if (j == 400) {
+ start = test->index[this->id + this->del];
+ end = test->last[this->id + this->del];
+ mtree_store_range(mt, start, end, NULL, GFP_KERNEL);
+ }
+
+ /* add */
+ if (j == 500) {
+ start = test->index[this->id + this->add];
+ end = test->last[this->id + this->add];
+ mtree_store_range(mt, start, end,
+ xa_mk_value(start), GFP_KERNEL);
+ }
+ }
+ usleep(test->pause);
+ /* If a test fails, don't flood the console */
+ if (test->stop)
+ break;
+ }
+}
+
+/*
+ * This is to check:
+ * 1. Range that is not ever present
+ * 2. Range that is always present
+ * 3. Things being added but not removed.
+ * 4. Things being removed but not added.
+ * 5. Things are being added and removed, searches my succeed or fail
+ *
+ * This sets up two readers for every 10 entries; one forward and one reverse
+ * reading.
+ */
+static void rcu_stress(struct maple_tree *mt, bool forward)
+{
+ unsigned int count, i;
+ unsigned long r, seed;
+ pthread_t readers[RCU_RANGE_COUNT / 5];
+ struct rcu_test_struct2 test;
+ struct rcu_reader_struct test_reader[RCU_RANGE_COUNT / 5];
+ void *(*function)(void *);
+
+ /* Test setup */
+ test.mt = mt;
+ test.pause = 5;
+ test.seen_toggle = 0;
+ test.seen_deleted = 0;
+ test.seen_added = 0;
+ test.seen_modified = 0;
+ test.thread_count = 0;
+ test.start = test.stop = false;
+ seed = time(NULL);
+ srand(seed);
+ for (i = 0; i < RCU_RANGE_COUNT; i++) {
+ r = seed + rand();
+ mtree_store_range(mt, seed, r,
+ xa_mk_value(seed), GFP_KERNEL);
+
+ /* Record start and end of entry */
+ test.index[i] = seed;
+ test.last[i] = r;
+ seed = 1 + r + rand() % 10;
+ }
+
+ i = count = ARRAY_SIZE(readers);
+ while (i--) {
+ unsigned long id;
+
+ id = i / 2 * 10;
+ if (i % 2)
+ function = rcu_reader_fwd;
+ else
+ function = rcu_reader_rev;
+
+ rcu_reader_setup(&test_reader[i], id, &test);
+ if (pthread_create(&readers[i], NULL, *function,
+ &test_reader[i])) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ struct rcu_reader_struct *this = &test_reader[i];
+ int add = this->id + this->add;
+
+ /* Remove add entries from the tree for later addition */
+ mtree_store_range(mt, test.index[add], test.last[add],
+ NULL, GFP_KERNEL);
+ }
+
+ mt_set_in_rcu(mt);
+ do {
+ usleep(5);
+ } while (test.thread_count > ARRAY_SIZE(readers));
+
+ if (forward)
+ rcu_stress_fwd(mt, &test, count, test_reader);
+ else
+ rcu_stress_rev(mt, &test, count, test_reader);
+
+ test.stop = true;
+ while (count--)
+ pthread_join(readers[count], NULL);
+
+ mt_validate(mt);
+}
+
+
+struct rcu_test_struct {
+ struct maple_tree *mt; /* the maple tree */
+ int count; /* Number of times to check value(s) */
+ unsigned long index; /* The first index to check */
+ void *entry1; /* The first entry value */
+ void *entry2; /* The second entry value */
+ void *entry3; /* The third entry value */
+
+ bool update_2;
+ bool update_3;
+ unsigned long range_start;
+ unsigned long range_end;
+ unsigned int loop_sleep;
+ unsigned int val_sleep;
+
+ unsigned int failed; /* failed detection for other threads */
+ unsigned int seen_entry2; /* Number of threads that have seen the new value */
+ unsigned int seen_entry3; /* Number of threads that have seen the new value */
+ unsigned int seen_both; /* Number of threads that have seen both new values */
+ unsigned int seen_toggle;
+ unsigned int seen_added;
+ unsigned int seen_removed;
+ unsigned long last; /* The end of the range to write. */
+
+ unsigned long removed; /* The index of the removed entry */
+ unsigned long added; /* The index of the removed entry */
+ unsigned long toggle; /* The index of the removed entry */
+};
+
+static inline
+int eval_rcu_entry(struct rcu_test_struct *test, void *entry, bool *update_2,
+ bool *update_3)
+{
+ if (entry == test->entry1)
+ return 0;
+
+ if (entry == test->entry2) {
+ if (!(*update_2)) {
+ uatomic_inc(&test->seen_entry2);
+ *update_2 = true;
+ if (update_3)
+ uatomic_inc(&test->seen_both);
+ }
+ return 0;
+ }
+
+ if (entry == test->entry3) {
+ if (!(*update_3)) {
+ uatomic_inc(&test->seen_entry3);
+ *update_3 = true;
+ if (update_2)
+ uatomic_inc(&test->seen_both);
+ }
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * rcu_val() - Read a given value in the tree test->count times using the
+ * regular API
+ *
+ * @ptr: The pointer to the rcu_test_struct
+ */
+static void *rcu_val(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct *)ptr;
+ unsigned long count = test->count;
+ bool update_2 = false;
+ bool update_3 = false;
+ void *entry;
+
+ rcu_register_thread();
+ while (count--) {
+ usleep(test->val_sleep);
+ /*
+ * No locking required, regular API locking is handled in the
+ * maple tree code
+ */
+ entry = mtree_load(test->mt, test->index);
+ MT_BUG_ON(test->mt, eval_rcu_entry(test, entry, &update_2,
+ &update_3));
+ }
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/*
+ * rcu_loop() - Loop over a section of the maple tree, checking for an expected
+ * value using the advanced API
+ *
+ * @ptr - The pointer to the rcu_test_struct
+ */
+static void *rcu_loop(void *ptr)
+{
+ struct rcu_test_struct *test = (struct rcu_test_struct *)ptr;
+ unsigned long count = test->count;
+ void *entry, *expected;
+ bool update_2 = false;
+ bool update_3 = false;
+ MA_STATE(mas, test->mt, test->range_start, test->range_start);
+
+ rcu_register_thread();
+
+ /*
+ * Loop through the test->range_start - test->range_end test->count
+ * times
+ */
+ while (count--) {
+ usleep(test->loop_sleep);
+ rcu_read_lock();
+ mas_for_each(&mas, entry, test->range_end) {
+ /* The expected value is based on the start range. */
+ expected = xa_mk_value(mas.index ? mas.index / 10 : 0);
+
+ /* Out of the interesting range */
+ if (mas.index < test->index || mas.index > test->last) {
+ if (entry != expected) {
+ printk("%lx - %lx = %p not %p\n",
+ mas.index, mas.last, entry, expected);
+ }
+ MT_BUG_ON(test->mt, entry != expected);
+ continue;
+ }
+
+ if (entry == expected)
+ continue; /* Not seen. */
+
+ /* In the interesting range */
+ MT_BUG_ON(test->mt, eval_rcu_entry(test, entry,
+ &update_2,
+ &update_3));
+ }
+ rcu_read_unlock();
+ mas_set(&mas, test->range_start);
+ }
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+static noinline
+void run_check_rcu(struct maple_tree *mt, struct rcu_test_struct *vals)
+{
+
+ int i;
+ void *(*function)(void *);
+ pthread_t readers[20];
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i % 2)
+ function = rcu_loop;
+ else
+ function = rcu_val;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5); /* small yield to ensure all threads are at least started. */
+ mtree_store_range(mt, vals->index, vals->last, vals->entry2,
+ GFP_KERNEL);
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ /* Make sure the test caught at least one update. */
+ MT_BUG_ON(mt, !vals->seen_entry2);
+}
+
+static noinline
+void run_check_rcu_slowread(struct maple_tree *mt, struct rcu_test_struct *vals)
+{
+
+ int i;
+ void *(*function)(void *);
+ pthread_t readers[20];
+ unsigned int index = vals->index;
+
+ mt_set_in_rcu(mt);
+ MT_BUG_ON(mt, !mt_in_rcu(mt));
+
+ for (i = 0; i < ARRAY_SIZE(readers); i++) {
+ if (i % 2)
+ function = rcu_loop;
+ else
+ function = rcu_val;
+
+ if (pthread_create(&readers[i], NULL, *function, vals)) {
+ perror("creating reader thread");
+ exit(1);
+ }
+ }
+
+ usleep(5); /* small yield to ensure all threads are at least started. */
+
+ while (index <= vals->last) {
+ mtree_store(mt, index,
+ (index % 2 ? vals->entry2 : vals->entry3),
+ GFP_KERNEL);
+ index++;
+ usleep(5);
+ }
+
+ while (i--)
+ pthread_join(readers[i], NULL);
+
+ /* Make sure the test caught at least one update. */
+ MT_BUG_ON(mt, !vals->seen_entry2);
+ MT_BUG_ON(mt, !vals->seen_entry3);
+ MT_BUG_ON(mt, !vals->seen_both);
+}
+static noinline void check_rcu_simulated(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 1000;
+ unsigned long target = 4320;
+ unsigned long val = 0xDEAD;
+
+ MA_STATE(mas_writer, mt, 0, 0);
+ MA_STATE(mas_reader, mt, target, target);
+
+ rcu_register_thread();
+
+ mt_set_in_rcu(mt);
+ mas_lock(&mas_writer);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_writer.index = i * 10;
+ mas_writer.last = i * 10 + 5;
+ mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL);
+ }
+ mas_unlock(&mas_writer);
+
+ /* Overwrite one entry with a new value. */
+ mas_set_range(&mas_writer, target, target + 5);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+
+ /* Overwrite 1/2 the entry */
+ mas_set_range(&mas_writer, target, target + 2);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite last 1/2 the entry */
+ mas_set_range(&mas_writer, target + 2, target + 5);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ rcu_read_unlock();
+
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite more than the entry */
+ mas_set_range(&mas_writer, target - 5, target + 15);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite more than the node. */
+ mas_set_range(&mas_writer, target - 400, target + 400);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ mas_reset(&mas_reader);
+
+ /* Overwrite the tree */
+ mas_set_range(&mas_writer, 0, ULONG_MAX);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Clear out tree & recreate it */
+ mas_lock(&mas_writer);
+ mas_set_range(&mas_writer, 0, ULONG_MAX);
+ mas_store_gfp(&mas_writer, NULL, GFP_KERNEL);
+ mas_set_range(&mas_writer, 0, 0);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_writer.index = i * 10;
+ mas_writer.last = i * 10 + 5;
+ mas_store_gfp(&mas_writer, xa_mk_value(i), GFP_KERNEL);
+ }
+ mas_unlock(&mas_writer);
+
+ /* next check */
+ /* Overwrite one entry with a new value. */
+ mas_reset(&mas_reader);
+ mas_set_range(&mas_writer, target, target + 5);
+ mas_set_range(&mas_reader, target, target);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_prev(&mas_reader, 0);
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_next(&mas_reader, ULONG_MAX) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ /* Restore value. */
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(target/10), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+
+ /* prev check */
+ /* Overwrite one entry with a new value. */
+ mas_reset(&mas_reader);
+ mas_set_range(&mas_writer, target, target + 5);
+ mas_set_range(&mas_reader, target, target);
+ rcu_read_lock();
+ MT_BUG_ON(mt, mas_walk(&mas_reader) != xa_mk_value(target/10));
+ mas_next(&mas_reader, ULONG_MAX);
+ mas_lock(&mas_writer);
+ mas_store_gfp(&mas_writer, xa_mk_value(val), GFP_KERNEL);
+ mas_unlock(&mas_writer);
+ MT_BUG_ON(mt, mas_prev(&mas_reader, 0) != xa_mk_value(val));
+ rcu_read_unlock();
+
+ rcu_unregister_thread();
+}
+
+static noinline void check_rcu_threaded(struct maple_tree *mt)
+{
+ unsigned long i, nr_entries = 1000;
+ struct rcu_test_struct vals;
+
+ vals.val_sleep = 200;
+ vals.loop_sleep = 110;
+
+ rcu_register_thread();
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+ /* Store across several slots. */
+ vals.count = 1000;
+ vals.mt = mt;
+ vals.index = 8650;
+ vals.last = 8666;
+ vals.entry1 = xa_mk_value(865);
+ vals.entry2 = xa_mk_value(8650);
+ vals.entry3 = xa_mk_value(8650);
+ vals.range_start = 0;
+ vals.range_end = ULONG_MAX;
+ vals.seen_entry2 = 0;
+ vals.seen_entry3 = 0;
+
+ run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* 4390-4395: value 439 (0x1b7) [0x36f] */
+ /* Store across several slots. */
+ /* Spanning store. */
+ vals.count = 10000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.entry3 = xa_mk_value(439);
+ vals.seen_entry2 = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ run_check_rcu(mt, &vals);
+ mtree_destroy(mt);
+
+
+ /* Forward writer for rcu stress */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ rcu_stress(mt, true);
+ mtree_destroy(mt);
+
+ /* Reverse writer for rcu stress */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ rcu_stress(mt, false);
+ mtree_destroy(mt);
+
+ /* Slow reader test with spanning store. */
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+ /* 4390-4395: value 439 (0x1b7) [0x36f] */
+ /* Store across several slots. */
+ /* Spanning store. */
+ vals.count = 15000;
+ vals.mt = mt;
+ vals.index = 4390;
+ vals.last = 4398;
+ vals.entry1 = xa_mk_value(4390);
+ vals.entry2 = xa_mk_value(439);
+ vals.entry3 = xa_mk_value(4391);
+ vals.seen_toggle = 0;
+ vals.seen_added = 0;
+ vals.seen_removed = 0;
+ vals.range_start = 4316;
+ vals.range_end = 5035;
+ vals.removed = 4360;
+ vals.added = 4396;
+ vals.toggle = 4347;
+ vals.val_sleep = 400;
+ vals.loop_sleep = 200;
+ vals.seen_entry2 = 0;
+ vals.seen_entry3 = 0;
+ vals.seen_both = 0;
+ vals.entry3 = xa_mk_value(438);
+
+ run_check_rcu_slowread(mt, &vals);
+ rcu_unregister_thread();
+}
+
+extern void test_kmem_cache_bulk(void);
+
+/* Test spanning writes that require balancing right sibling or right cousin */
+static noinline void check_spanning_relatives(struct maple_tree *mt)
+{
+
+ unsigned long i, nr_entries = 1000;
+
+ for (i = 0; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, i*10 + 5,
+ xa_mk_value(i), GFP_KERNEL);
+
+
+ mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL);
+}
+
+static noinline void check_fuzzer(struct maple_tree *mt)
+{
+ /*
+ * 1. Causes a spanning rebalance of a single root node.
+ * Fixed by setting the correct limit in mast_cp_to_nodes() when the
+ * entire right side is consumed.
+ */
+ mtree_test_insert(mt, 88, (void *)0xb1);
+ mtree_test_insert(mt, 84, (void *)0xa9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 14, (void *)0x1d);
+ mtree_test_insert(mt, 7, (void *)0xf);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_store_range(mt, 8, 18, (void *)0x11);
+ mtree_destroy(mt);
+
+
+ /*
+ * 2. Cause a spanning rebalance of two nodes in root.
+ * Fixed by setting mast->r->max correctly.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 87, (void *)0xaf);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 4);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_store(mt, 44, (void *)0x59);
+ mtree_test_store(mt, 68, (void *)0x89);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 43, (void *)0x57);
+ mtree_test_insert(mt, 24, (void *)0x31);
+ mtree_test_insert(mt, 844, (void *)0x699);
+ mtree_test_store(mt, 84, (void *)0xa9);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 4);
+ mtree_test_load(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 3. Cause a node overflow on copy
+ * Fixed by using the correct check for node size in mas_wr_modify()
+ * Also discovered issue with metadata setting.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store_range(mt, 0, 18446744073709551615UL, (void *)0x1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 5);
+ mtree_test_erase(mt, 4);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_store(mt, 444, (void *)0x379);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 4. spanning store failure due to writing incorrect pivot value at
+ * last slot.
+ * Fixed by setting mast->r->max correctly in mast_cp_to_nodes()
+ *
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 261, (void *)0x20b);
+ mtree_test_store(mt, 516, (void *)0x409);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1256, (void *)0x9d1);
+ mtree_test_store(mt, 4, (void *)0x9);
+ mtree_test_erase(mt, 1);
+ mtree_test_store(mt, 56, (void *)0x71);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 24, (void *)0x31);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2263, (void *)0x11af);
+ mtree_test_insert(mt, 446, (void *)0x37d);
+ mtree_test_store_range(mt, 6, 45, (void *)0xd);
+ mtree_test_store_range(mt, 3, 446, (void *)0x7);
+ mtree_destroy(mt);
+
+ /*
+ * 5. mas_wr_extend_null() may overflow slots.
+ * Fix by checking against wr_mas->node_end.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 48, (void *)0x61);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_load(mt, 0);
+ mtree_test_store(mt, 88, (void *)0xb1);
+ mtree_test_store(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2480, (void *)0x1361);
+ mtree_test_insert(mt, 18446744073709551615UL,
+ (void *)0xffffffffffffffff);
+ mtree_test_erase(mt, 18446744073709551615UL);
+ mtree_destroy(mt);
+
+ /*
+ * 6. When reusing a node with an implied pivot and the node is
+ * shrinking, old data would be left in the implied slot
+ * Fixed by checking the last pivot for the mas->max and clear
+ * accordingly. This only affected the left-most node as that node is
+ * the only one allowed to end in NULL.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_load(mt, 2);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 15, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_insert(mt, 122, (void *)0xf5);
+ mtree_test_store(mt, 3, (void *)0x7);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 0, 1, (void *)0x1);
+ mtree_test_insert(mt, 85, (void *)0xab);
+ mtree_test_insert(mt, 72, (void *)0x91);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_insert(mt, 726, (void *)0x5ad);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 51, (void *)0x67);
+ mtree_test_insert(mt, 611, (void *)0x4c7);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 26, 1, (void *)0x35);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 3, (void *)0x7);
+ mtree_test_store(mt, 621, (void *)0x4db);
+ mtree_test_insert(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 5);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store(mt, 62, (void *)0x7d);
+ mtree_test_erase(mt, 62);
+ mtree_test_store_range(mt, 1, 0, (void *)0x3);
+ mtree_test_insert(mt, 22, (void *)0x2d);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_store_range(mt, 4, 62, (void *)0x9);
+ mtree_test_erase(mt, 62);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 22, (void *)0x3);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_load(mt, 53);
+ mtree_test_load(mt, 1);
+ mtree_test_store_range(mt, 1, 1, (void *)0x3);
+ mtree_test_insert(mt, 222, (void *)0x1bd);
+ mtree_test_insert(mt, 485, (void *)0x3cb);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_destroy(mt);
+
+ /*
+ * 7. Previous fix was incomplete, fix mas_resuse_node() clearing of old
+ * data by overwriting it first - that way metadata is of no concern.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_load(mt, 1);
+ mtree_test_insert(mt, 102, (void *)0xcd);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 0);
+ mtree_test_load(mt, 0);
+ mtree_test_insert(mt, 4, (void *)0x9);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 110, (void *)0xdd);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 5, 0, (void *)0xb);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 112, (void *)0xe1);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 110, 2, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 210, (void *)0x1a5);
+ mtree_test_store_range(mt, 0, 2, (void *)0x1);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_erase(mt, 22);
+ mtree_test_erase(mt, 1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 1, 2, (void *)0x3);
+ mtree_test_erase(mt, 0);
+ mtree_test_erase(mt, 2);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_load(mt, 112);
+ mtree_test_store_range(mt, 110, 12, (void *)0xdd);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 110);
+ mtree_test_insert_range(mt, 4, 71, (void *)0x9);
+ mtree_test_load(mt, 2);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_insert_range(mt, 11, 22, (void *)0x17);
+ mtree_test_erase(mt, 12);
+ mtree_test_store(mt, 2, (void *)0x5);
+ mtree_test_load(mt, 22);
+ mtree_destroy(mt);
+
+
+ /*
+ * 8. When rebalancing or spanning_rebalance(), the max of the new node
+ * may be set incorrectly to the final pivot and not the right max.
+ * Fix by setting the left max to orig right max if the entire node is
+ * consumed.
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 6716, (void *)0x3479);
+ mtree_test_store(mt, 61, (void *)0x7b);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_store(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 0);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 7167, (void *)0xd);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 667167);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_erase(mt, 1);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_erase(mt, 67);
+ mtree_test_insert(mt, 15, (void *)0x1f);
+ mtree_test_insert(mt, 67167, (void *)0x20cbf);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_load(mt, 7);
+ mtree_test_insert(mt, 16, (void *)0x21);
+ mtree_test_insert(mt, 36, (void *)0x49);
+ mtree_test_store(mt, 67, (void *)0x87);
+ mtree_test_store(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 367, (void *)0x2df);
+ mtree_test_insert(mt, 115, (void *)0xe7);
+ mtree_test_store(mt, 0, (void *)0x1);
+ mtree_test_store_range(mt, 1, 3, (void *)0x3);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_test_insert_range(mt, 6, 47, (void *)0xd);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert_range(mt, 1, 67, (void *)0x3);
+ mtree_test_load(mt, 67);
+ mtree_test_insert(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 67167);
+ mtree_destroy(mt);
+
+ /*
+ * 9. spanning store to the end of data caused an invalid metadata
+ * length which resulted in a crash eventually.
+ * Fix by checking if there is a value in pivot before incrementing the
+ * metadata end in mab_mas_cp(). To ensure this doesn't happen again,
+ * abstract the two locations this happens into a function called
+ * mas_leaf_set_meta().
+ */
+ mt_init_flags(mt, 0);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 12, (void *)0x19);
+ mtree_test_insert(mt, 6, (void *)0xd);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 91, (void *)0xb7);
+ mtree_test_insert(mt, 18, (void *)0x25);
+ mtree_test_insert(mt, 81, (void *)0xa3);
+ mtree_test_store_range(mt, 0, 128, (void *)0x1);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_erase(mt, 8);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 18446744073709551605UL, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, 18446744073709551605UL);
+ mtree_test_store_range(mt, 0, 281, (void *)0x1);
+ mtree_test_erase(mt, 2);
+ mtree_test_insert(mt, 1211, (void *)0x977);
+ mtree_test_insert(mt, 111, (void *)0xdf);
+ mtree_test_insert(mt, 13, (void *)0x1b);
+ mtree_test_insert(mt, 211, (void *)0x1a7);
+ mtree_test_insert(mt, 11, (void *)0x17);
+ mtree_test_insert(mt, 5, (void *)0xb);
+ mtree_test_insert(mt, 1218, (void *)0x985);
+ mtree_test_insert(mt, 61, (void *)0x7b);
+ mtree_test_store(mt, 1, (void *)0x3);
+ mtree_test_insert(mt, 121, (void *)0xf3);
+ mtree_test_insert(mt, 8, (void *)0x11);
+ mtree_test_insert(mt, 21, (void *)0x2b);
+ mtree_test_insert(mt, 2, (void *)0x5);
+ mtree_test_insert(mt, 18446744073709551605UL, (void *)0xffffffffffffffeb);
+ mtree_test_erase(mt, 18446744073709551605UL);
+}
+static noinline void check_dup_gaps(struct maple_tree *mt,
+ unsigned long nr_entries, bool zero_start,
+ unsigned long gap)
+{
+ unsigned long i = 0;
+ struct maple_tree newmt;
+ int ret;
+ void *tmp;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+
+
+ if (!zero_start)
+ i = 1;
+
+ mt_zero_nr_tallocated();
+ for (; i <= nr_entries; i++)
+ mtree_store_range(mt, i*10, (i+1)*10 - gap,
+ xa_mk_value(i), GFP_KERNEL);
+
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_set_non_kernel(99999);
+ ret = mas_expected_entries(&newmas, nr_entries);
+ mt_set_non_kernel(0);
+ MT_BUG_ON(mt, ret != 0);
+
+ mas_for_each(&mas, tmp, ULONG_MAX) {
+ newmas.index = mas.index;
+ newmas.last = mas.last;
+ mas_store(&newmas, tmp);
+ }
+
+ mas_destroy(&mas);
+ mas_destroy(&newmas);
+ mtree_destroy(&newmt);
+}
+
+static noinline void check_dup(struct maple_tree *mt)
+{
+ int i;
+
+ /* Check with a value at zero */
+ for (i = 10; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Check with a value at zero, no gap */
+ for (i = 1000; i < 2000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 0);
+ mtree_destroy(mt);
+ }
+
+ /* Check with a value at zero and unreasonably large */
+ for (i = 100010; i < 100020; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Small to medium size not starting at zero*/
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Unreasonably large not starting at zero*/
+ for (i = 100010; i < 100020; i++) {
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Check non-allocation tree not starting at zero */
+ for (i = 1500; i < 3000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, false, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Check non-allocation tree starting at zero */
+ for (i = 200; i < 1000; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ }
+
+ /* Unreasonably large */
+ for (i = 100015; i < 100020; i++) {
+ mt_init_flags(mt, 0);
+ check_dup_gaps(mt, i, true, 5);
+ mtree_destroy(mt);
+ }
+
+}
+
+static DEFINE_MTREE(tree);
+static int maple_tree_seed(void)
+{
+ unsigned long set[] = {5015, 5014, 5017, 25, 1000,
+ 1001, 1002, 1003, 1005, 0,
+ 5003, 5002};
+ void *ptr = &set;
+
+ pr_info("\nTEST STARTING\n\n");
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_root_expand(&tree);
+ mtree_destroy(&tree);
+
+#if defined(BENCH_SLOT_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_slot_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_NODE_STORE)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_node_store(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_AWALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_awalk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_WALK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_walk(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_forking(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+#if defined(BENCH_MT_FOR_EACH)
+#define BENCH
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ bench_mt_for_each(&tree);
+ mtree_destroy(&tree);
+ goto skip;
+#endif
+
+ test_kmem_cache_bulk();
+
+ mt_init_flags(&tree, 0);
+ check_new_node(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_prealloc(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_write(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_null_expand(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_dfs_preorder(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_forking(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_mas_store_gfp(&tree);
+ mtree_destroy(&tree);
+
+ /* Test ranges (store and insert) */
+ mt_init_flags(&tree, 0);
+ check_ranges(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_range(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_alloc_rev_range(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[9], &tree); /* Insert 0 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[0], NULL); /* See if 5015 -> NULL */
+
+ check_insert(&tree, set[10], ptr); /* Insert 5003 */
+ check_load(&tree, set[9], &tree); /* See if 0 -> &tree */
+ check_load(&tree, set[11], NULL); /* See if 5002 -> NULL */
+ check_load(&tree, set[10], ptr); /* See if 5003 -> ptr */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ /* Try to insert, insert a dup, and load back what was inserted. */
+ mt_init_flags(&tree, 0);
+ check_insert(&tree, set[0], &tree); /* Insert 5015 */
+ check_dup_insert(&tree, set[0], &tree); /* Insert 5015 again */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+
+ /*
+ * Second set of tests try to load a value that doesn't exist, inserts
+ * a second value, then loads the value again
+ */
+ check_load(&tree, set[1], NULL); /* See if 5014 -> NULL */
+ check_insert(&tree, set[1], ptr); /* insert 5014 -> ptr */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ /*
+ * Tree currently contains:
+ * p[0]: 14 -> (nil) p[1]: 15 -> ptr p[2]: 16 -> &tree p[3]: 0 -> (nil)
+ */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+
+ check_load(&tree, set[0], &tree); /* See if 5015 -> &tree */
+ check_load(&tree, set[1], ptr); /* See if 5014 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* 1003 = &tree ? */
+
+ /* Clear out tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /* Test inserting into a NULL hole. */
+ check_insert(&tree, set[5], ptr); /* insert 1001 -> ptr */
+ check_insert(&tree, set[7], &tree); /* insert 1003 -> &tree */
+ check_insert(&tree, set[6], ptr); /* insert 1002 -> ptr */
+ check_load(&tree, set[5], ptr); /* See if 1001 -> ptr */
+ check_load(&tree, set[6], ptr); /* See if 1002 -> ptr */
+ check_load(&tree, set[7], &tree); /* See if 1003 -> &tree */
+
+ /* Clear out the tree */
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_erase_testset(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ /*
+ * set[] = {5015, 5014, 5017, 25, 1000,
+ * 1001, 1002, 1003, 1005, 0,
+ * 5003, 5002};
+ */
+
+ check_insert(&tree, set[0], ptr); /* 5015 */
+ check_insert(&tree, set[1], &tree); /* 5014 */
+ check_insert(&tree, set[2], ptr); /* 5017 */
+ check_insert(&tree, set[3], &tree); /* 25 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_insert(&tree, set[4], ptr); /* 1000 < Should split. */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree); /*25 */
+ check_load(&tree, set[4], ptr);
+ check_insert(&tree, set[5], &tree); /* 1001 */
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_insert(&tree, set[6], ptr);
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_insert(&tree, set[7], &tree);
+ check_load(&tree, set[0], ptr);
+ check_insert(&tree, set[8], ptr);
+
+ check_insert(&tree, set[9], &tree);
+
+ check_load(&tree, set[0], ptr);
+ check_load(&tree, set[1], &tree);
+ check_load(&tree, set[2], ptr);
+ check_load(&tree, set[3], &tree);
+ check_load(&tree, set[4], ptr);
+ check_load(&tree, set[5], &tree);
+ check_load(&tree, set[6], ptr);
+ check_load(&tree, set[9], &tree);
+ mtree_destroy(&tree);
+
+ check_nomem(&tree);
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 16, false);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_seq(&tree, 1000, true);
+ mtree_destroy(&tree);
+
+ check_lower_bound_split(&tree);
+ check_upper_bound_split(&tree);
+ check_mid_split(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_next_entry(&tree);
+ check_find(&tree);
+ check_find_2(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_prev_entry(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_erase2_sets(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_gap_combining(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_node_overwrite(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ next_prev_test(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rcu_simulated(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rcu_threaded(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_spanning_relatives(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_rev_find(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, 0);
+ check_fuzzer(&tree);
+ mtree_destroy(&tree);
+
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ check_dup(&tree);
+ mtree_destroy(&tree);
+
+#if defined(BENCH)
+skip:
+#endif
+ rcu_barrier();
+ pr_info("maple_tree: %u of %u tests passed\n",
+ atomic_read(&maple_tree_tests_passed),
+ atomic_read(&maple_tree_tests_run));
+ if (atomic_read(&maple_tree_tests_run) ==
+ atomic_read(&maple_tree_tests_passed))
+ return 0;
+
+ return -EINVAL;
+}
+
+static void maple_tree_harvest(void)
+{
+
+}
+
+module_init(maple_tree_seed);
+module_exit(maple_tree_harvest);
+MODULE_AUTHOR("Liam R. Howlett <Liam.Howlett@Oracle.com>");
+MODULE_LICENSE("GPL");
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 7413dd300516..1505a52f23a0 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -12,8 +12,9 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
unsigned long res = n;
might_fault();
if (!should_fail_usercopy() && likely(access_ok(from, n))) {
- instrument_copy_from_user(to, from, n);
+ instrument_copy_from_user_before(to, from, n);
res = raw_copy_from_user(to, from, n);
+ instrument_copy_from_user_after(to, from, n, res);
}
if (unlikely(res))
memset(to + (n - res), 0, res);
diff --git a/mm/Kconfig b/mm/Kconfig
index 3897e924e40f..57e1d8c5b505 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -23,7 +23,7 @@ menuconfig SWAP
in your computer. If unsure say Y.
config ZSWAP
- bool "Compressed cache for swap pages (EXPERIMENTAL)"
+ bool "Compressed cache for swap pages"
depends on SWAP
select FRONTSWAP
select CRYPTO
@@ -36,12 +36,6 @@ config ZSWAP
in the case where decompressing from RAM is faster than swap device
reads, can also improve workload performance.
- This is marked experimental because it is a new feature (as of
- v3.11) that interacts heavily with memory reclaim. While these
- interactions don't cause any known issues on simple memory setups,
- they have not be fully explored on the large set of potential
- configurations and workloads that exist.
-
config ZSWAP_DEFAULT_ON
bool "Enable the compressed cache for swap pages by default"
depends on ZSWAP
@@ -1130,6 +1124,32 @@ config PTE_MARKER_UFFD_WP
purposes. It is required to enable userfaultfd write protection on
file-backed memory types like shmem and hugetlbfs.
+# multi-gen LRU {
+config LRU_GEN
+ bool "Multi-Gen LRU"
+ depends on MMU
+ # make sure folio->flags has enough spare bits
+ depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
+ help
+ A high performance LRU implementation to overcommit memory. See
+ Documentation/admin-guide/mm/multigen_lru.rst for details.
+
+config LRU_GEN_ENABLED
+ bool "Enable by default"
+ depends on LRU_GEN
+ help
+ This option enables the multi-gen LRU by default.
+
+config LRU_GEN_STATS
+ bool "Full stats for debugging"
+ depends on LRU_GEN
+ help
+ Do not enable this option unless you plan to look at historical stats
+ from evicted generations for debugging purpose.
+
+ This option has a per-memcg and per-node memory overhead.
+# }
+
source "mm/damon/Kconfig"
endmenu
diff --git a/mm/Makefile b/mm/Makefile
index 9a564f836403..8e105e5b3e29 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -52,7 +52,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \
- compaction.o vmacache.o \
+ compaction.o \
interval_tree.o list_lru.o workingset.o \
debug.o gup.o mmap_lock.o $(mmu-y)
@@ -89,14 +89,18 @@ obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_KFENCE) += kfence/
+obj-$(CONFIG_KMSAN) += kmsan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMTEST) += memtest.o
obj-$(CONFIG_MIGRATION) += migrate.o
+obj-$(CONFIG_NUMA) += memory-tiers.o
obj-$(CONFIG_DEVICE_MIGRATION) += migrate_device.o
obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o khugepaged.o
obj-$(CONFIG_PAGE_COUNTER) += page_counter.o
obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o
-obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o
+ifdef CONFIG_SWAP
+obj-$(CONFIG_MEMCG) += swap_cgroup.o
+endif
obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o
obj-$(CONFIG_GUP_TEST) += gup_test.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index de65cb1e5f76..c30419a5e119 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -776,8 +776,6 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
int bdi_init(struct backing_dev_info *bdi)
{
- int ret;
-
bdi->dev = NULL;
kref_init(&bdi->refcnt);
@@ -788,9 +786,7 @@ int bdi_init(struct backing_dev_info *bdi)
INIT_LIST_HEAD(&bdi->wb_list);
init_waitqueue_head(&bdi->wb_waitq);
- ret = cgwb_bdi_init(bdi);
-
- return ret;
+ return cgwb_bdi_init(bdi);
}
struct backing_dev_info *bdi_alloc(int node_id)
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index c3ffe253e055..602fff89b15f 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -163,11 +163,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(cma_alloc_fops, NULL, cma_alloc_write, "%llu\n");
static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
{
struct dentry *tmp;
- char name[CMA_MAX_NAME];
- scnprintf(name, sizeof(name), "cma-%s", cma->name);
-
- tmp = debugfs_create_dir(name, root_dentry);
+ tmp = debugfs_create_dir(cma->name, root_dentry);
debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops);
debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops);
diff --git a/mm/compaction.c b/mm/compaction.c
index 10561cb1aaad..2dd02c4683c4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -52,8 +52,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
#define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
-#define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order)
-#define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order)
/*
* Page order with-respect-to which proactive compaction
@@ -404,7 +402,7 @@ static bool test_and_set_skip(struct compact_control *cc, struct page *page,
if (cc->ignore_skip_hint)
return false;
- if (!IS_ALIGNED(pfn, pageblock_nr_pages))
+ if (!pageblock_aligned(pfn))
return false;
skip = get_pageblock_skip(page);
@@ -886,7 +884,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* COMPACT_CLUSTER_MAX at a time so the second call must
* not falsely conclude that the block should be skipped.
*/
- if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
+ if (!valid_page && pageblock_aligned(low_pfn)) {
if (!isolation_suitable(cc, page)) {
low_pfn = end_pfn;
page = NULL;
@@ -1935,7 +1933,7 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
* before making it "skip" so other compaction instances do
* not scan the same block.
*/
- if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
+ if (pageblock_aligned(low_pfn) &&
!fast_find_block && !isolation_suitable(cc, page))
continue;
@@ -1977,9 +1975,21 @@ static inline bool is_via_compact_memory(int order)
return order == -1;
}
+/*
+ * Determine whether kswapd is (or recently was!) running on this node.
+ *
+ * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
+ * zero it.
+ */
static bool kswapd_is_running(pg_data_t *pgdat)
{
- return pgdat->kswapd && task_is_running(pgdat->kswapd);
+ bool running;
+
+ pgdat_kswapd_lock(pgdat);
+ running = pgdat->kswapd && task_is_running(pgdat->kswapd);
+ pgdat_kswapd_unlock(pgdat);
+
+ return running;
}
/*
@@ -2109,7 +2119,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
* migration source is unmovable/reclaimable but it's not worth
* special casing.
*/
- if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
+ if (!pageblock_aligned(cc->migrate_pfn))
return COMPACT_CONTINUE;
/* Direct compactor: Is a suitable page free? */
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index 66265e3a9c65..7821fcb3f258 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -68,6 +68,9 @@ config DAMON_DBGFS
If unsure, say N.
+ This will be removed after >5.15.y LTS kernel is released, so users
+ should move to the sysfs interface (DAMON_SYSFS).
+
config DAMON_DBGFS_KUNIT_TEST
bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
depends on DAMON_DBGFS && KUNIT=y
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 573669566f84..3db9b7368756 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -126,7 +126,7 @@ static void damon_test_split_at(struct kunit *test)
t = damon_new_target();
r = damon_new_region(0, 100);
damon_add_region(r, t);
- damon_split_region_at(c, t, r, 25);
+ damon_split_region_at(t, r, 25);
KUNIT_EXPECT_EQ(test, r->ar.start, 0ul);
KUNIT_EXPECT_EQ(test, r->ar.end, 25ul);
@@ -219,14 +219,14 @@ static void damon_test_split_regions_of(struct kunit *test)
t = damon_new_target();
r = damon_new_region(0, 22);
damon_add_region(r, t);
- damon_split_regions_of(c, t, 2);
+ damon_split_regions_of(t, 2);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u);
damon_free_target(t);
t = damon_new_target();
r = damon_new_region(0, 220);
damon_add_region(r, t);
- damon_split_regions_of(c, t, 4);
+ damon_split_regions_of(t, 4);
KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u);
damon_free_target(t);
damon_destroy_ctx(c);
@@ -267,6 +267,28 @@ static void damon_test_ops_registration(struct kunit *test)
KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL);
}
+static void damon_test_set_regions(struct kunit *test)
+{
+ struct damon_target *t = damon_new_target();
+ struct damon_region *r1 = damon_new_region(4, 16);
+ struct damon_region *r2 = damon_new_region(24, 32);
+ struct damon_addr_range range = {.start = 8, .end = 28};
+ unsigned long expects[] = {8, 16, 16, 24, 24, 28};
+ int expect_idx = 0;
+ struct damon_region *r;
+
+ damon_add_region(r1, t);
+ damon_add_region(r2, t);
+ damon_set_regions(t, &range, 1);
+
+ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3);
+ damon_for_each_region(r, t) {
+ KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]);
+ KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]);
+ }
+ damon_destroy_target(t);
+}
+
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
@@ -276,6 +298,7 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_merge_regions_of),
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
+ KUNIT_CASE(damon_test_set_regions),
{},
};
diff --git a/mm/damon/core.c b/mm/damon/core.c
index 7d25dc582fe3..4de8c7c52979 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -29,6 +29,8 @@ static bool running_exclusive_ctxs;
static DEFINE_MUTEX(damon_ops_lock);
static struct damon_operations damon_registered_ops[NR_DAMON_OPS];
+static struct kmem_cache *damon_region_cache __ro_after_init;
+
/* Should be called under damon_ops_lock with id smaller than NR_DAMON_OPS */
static bool __damon_is_registered_ops(enum damon_ops_id id)
{
@@ -119,7 +121,7 @@ struct damon_region *damon_new_region(unsigned long start, unsigned long end)
{
struct damon_region *region;
- region = kmalloc(sizeof(*region), GFP_KERNEL);
+ region = kmem_cache_alloc(damon_region_cache, GFP_KERNEL);
if (!region)
return NULL;
@@ -148,7 +150,7 @@ static void damon_del_region(struct damon_region *r, struct damon_target *t)
static void damon_free_region(struct damon_region *r)
{
- kfree(r);
+ kmem_cache_free(damon_region_cache, r);
}
void damon_destroy_region(struct damon_region *r, struct damon_target *t)
@@ -169,6 +171,30 @@ static bool damon_intersect(struct damon_region *r,
}
/*
+ * Fill holes in regions with new regions.
+ */
+static int damon_fill_regions_holes(struct damon_region *first,
+ struct damon_region *last, struct damon_target *t)
+{
+ struct damon_region *r = first;
+
+ damon_for_each_region_from(r, t) {
+ struct damon_region *next, *newr;
+
+ if (r == last)
+ break;
+ next = damon_next_region(r);
+ if (r->ar.end != next->ar.start) {
+ newr = damon_new_region(r->ar.end, next->ar.start);
+ if (!newr)
+ return -ENOMEM;
+ damon_insert_region(newr, r, next, t);
+ }
+ }
+ return 0;
+}
+
+/*
* damon_set_regions() - Set regions of a target for given address ranges.
* @t: the given target.
* @ranges: array of new monitoring target ranges.
@@ -184,6 +210,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
{
struct damon_region *r, *next;
unsigned int i;
+ int err;
/* Remove regions which are not in the new ranges */
damon_for_each_region_safe(r, next, t) {
@@ -195,6 +222,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
damon_destroy_region(r, t);
}
+ r = damon_first_region(t);
/* Add new regions or resize existing regions to fit in the ranges */
for (i = 0; i < nr_ranges; i++) {
struct damon_region *first = NULL, *last, *newr;
@@ -202,7 +230,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
range = &ranges[i];
/* Get the first/last regions intersecting with the range */
- damon_for_each_region(r, t) {
+ damon_for_each_region_from(r, t) {
if (damon_intersect(r, range)) {
if (!first)
first = r;
@@ -225,52 +253,46 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
first->ar.start = ALIGN_DOWN(range->start,
DAMON_MIN_REGION);
last->ar.end = ALIGN(range->end, DAMON_MIN_REGION);
+
+ /* fill possible holes in the range */
+ err = damon_fill_regions_holes(first, last, t);
+ if (err)
+ return err;
}
}
return 0;
}
-struct damos *damon_new_scheme(
- unsigned long min_sz_region, unsigned long max_sz_region,
- unsigned int min_nr_accesses, unsigned int max_nr_accesses,
- unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota,
- struct damos_watermarks *wmarks)
+/* initialize private fields of damos_quota and return the pointer */
+static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
+{
+ quota->total_charged_sz = 0;
+ quota->total_charged_ns = 0;
+ quota->esz = 0;
+ quota->charged_sz = 0;
+ quota->charged_from = 0;
+ quota->charge_target_from = NULL;
+ quota->charge_addr_from = 0;
+ return quota;
+}
+
+struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks)
{
struct damos *scheme;
scheme = kmalloc(sizeof(*scheme), GFP_KERNEL);
if (!scheme)
return NULL;
- scheme->min_sz_region = min_sz_region;
- scheme->max_sz_region = max_sz_region;
- scheme->min_nr_accesses = min_nr_accesses;
- scheme->max_nr_accesses = max_nr_accesses;
- scheme->min_age_region = min_age_region;
- scheme->max_age_region = max_age_region;
+ scheme->pattern = *pattern;
scheme->action = action;
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
- scheme->quota.ms = quota->ms;
- scheme->quota.sz = quota->sz;
- scheme->quota.reset_interval = quota->reset_interval;
- scheme->quota.weight_sz = quota->weight_sz;
- scheme->quota.weight_nr_accesses = quota->weight_nr_accesses;
- scheme->quota.weight_age = quota->weight_age;
- scheme->quota.total_charged_sz = 0;
- scheme->quota.total_charged_ns = 0;
- scheme->quota.esz = 0;
- scheme->quota.charged_sz = 0;
- scheme->quota.charged_from = 0;
- scheme->quota.charge_target_from = NULL;
- scheme->quota.charge_addr_from = 0;
-
- scheme->wmarks.metric = wmarks->metric;
- scheme->wmarks.interval = wmarks->interval;
- scheme->wmarks.high = wmarks->high;
- scheme->wmarks.mid = wmarks->mid;
- scheme->wmarks.low = wmarks->low;
+ scheme->quota = *(damos_quota_init_priv(quota));
+
+ scheme->wmarks = *wmarks;
scheme->wmarks.activated = true;
return scheme;
@@ -360,17 +382,17 @@ struct damon_ctx *damon_new_ctx(void)
if (!ctx)
return NULL;
- ctx->sample_interval = 5 * 1000;
- ctx->aggr_interval = 100 * 1000;
- ctx->ops_update_interval = 60 * 1000 * 1000;
+ ctx->attrs.sample_interval = 5 * 1000;
+ ctx->attrs.aggr_interval = 100 * 1000;
+ ctx->attrs.ops_update_interval = 60 * 1000 * 1000;
ktime_get_coarse_ts64(&ctx->last_aggregation);
ctx->last_ops_update = ctx->last_aggregation;
mutex_init(&ctx->kdamond_lock);
- ctx->min_nr_regions = 10;
- ctx->max_nr_regions = 1000;
+ ctx->attrs.min_nr_regions = 10;
+ ctx->attrs.max_nr_regions = 1000;
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
@@ -406,32 +428,21 @@ void damon_destroy_ctx(struct damon_ctx *ctx)
/**
* damon_set_attrs() - Set attributes for the monitoring.
* @ctx: monitoring context
- * @sample_int: time interval between samplings
- * @aggr_int: time interval between aggregations
- * @ops_upd_int: time interval between monitoring operations updates
- * @min_nr_reg: minimal number of regions
- * @max_nr_reg: maximum number of regions
+ * @attrs: monitoring attributes
*
* This function should not be called while the kdamond is running.
* Every time interval is in micro-seconds.
*
* Return: 0 on success, negative error code otherwise.
*/
-int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
- unsigned long aggr_int, unsigned long ops_upd_int,
- unsigned long min_nr_reg, unsigned long max_nr_reg)
+int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
{
- if (min_nr_reg < 3)
+ if (attrs->min_nr_regions < 3)
return -EINVAL;
- if (min_nr_reg > max_nr_reg)
+ if (attrs->min_nr_regions > attrs->max_nr_regions)
return -EINVAL;
- ctx->sample_interval = sample_int;
- ctx->aggr_interval = aggr_int;
- ctx->ops_update_interval = ops_upd_int;
- ctx->min_nr_regions = min_nr_reg;
- ctx->max_nr_regions = max_nr_reg;
-
+ ctx->attrs = *attrs;
return 0;
}
@@ -443,10 +454,8 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int,
*
* This function should not be called while the kdamond of the context is
* running.
- *
- * Return: 0 if success, or negative error code otherwise.
*/
-int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
+void damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
ssize_t nr_schemes)
{
struct damos *s, *next;
@@ -456,7 +465,6 @@ int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes,
damon_destroy_scheme(s);
for (i = 0; i < nr_schemes; i++)
damon_add_scheme(ctx, schemes[i]);
- return 0;
}
/**
@@ -485,8 +493,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx)
sz += r->ar.end - r->ar.start;
}
- if (ctx->min_nr_regions)
- sz /= ctx->min_nr_regions;
+ if (ctx->attrs.min_nr_regions)
+ sz /= ctx->attrs.min_nr_regions;
if (sz < DAMON_MIN_REGION)
sz = DAMON_MIN_REGION;
@@ -635,7 +643,7 @@ static bool damon_check_reset_time_interval(struct timespec64 *baseline,
static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
{
return damon_check_reset_time_interval(&ctx->last_aggregation,
- ctx->aggr_interval);
+ ctx->attrs.aggr_interval);
}
/*
@@ -658,19 +666,20 @@ static void kdamond_reset_aggregated(struct damon_ctx *c)
}
}
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r);
+static void damon_split_region_at(struct damon_target *t,
+ struct damon_region *r, unsigned long sz_r);
static bool __damos_valid_target(struct damon_region *r, struct damos *s)
{
unsigned long sz;
sz = r->ar.end - r->ar.start;
- return s->min_sz_region <= sz && sz <= s->max_sz_region &&
- s->min_nr_accesses <= r->nr_accesses &&
- r->nr_accesses <= s->max_nr_accesses &&
- s->min_age_region <= r->age && r->age <= s->max_age_region;
+ return s->pattern.min_sz_region <= sz &&
+ sz <= s->pattern.max_sz_region &&
+ s->pattern.min_nr_accesses <= r->nr_accesses &&
+ r->nr_accesses <= s->pattern.max_nr_accesses &&
+ s->pattern.min_age_region <= r->age &&
+ r->age <= s->pattern.max_age_region;
}
static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
@@ -726,7 +735,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
continue;
sz = DAMON_MIN_REGION;
}
- damon_split_region_at(c, t, r, sz);
+ damon_split_region_at(t, r, sz);
r = damon_next_region(r);
sz = r->ar.end - r->ar.start;
}
@@ -745,7 +754,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
DAMON_MIN_REGION);
if (!sz)
goto update_stat;
- damon_split_region_at(c, t, r, sz);
+ damon_split_region_at(t, r, sz);
}
ktime_get_coarse_ts64(&begin);
sz_applied = c->ops.apply_scheme(c, t, r, s);
@@ -928,9 +937,8 @@ static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold,
* r the region to be split
* sz_r size of the first sub-region that will be made
*/
-static void damon_split_region_at(struct damon_ctx *ctx,
- struct damon_target *t, struct damon_region *r,
- unsigned long sz_r)
+static void damon_split_region_at(struct damon_target *t,
+ struct damon_region *r, unsigned long sz_r)
{
struct damon_region *new;
@@ -947,8 +955,7 @@ static void damon_split_region_at(struct damon_ctx *ctx,
}
/* Split every region in the given target into 'nr_subs' regions */
-static void damon_split_regions_of(struct damon_ctx *ctx,
- struct damon_target *t, int nr_subs)
+static void damon_split_regions_of(struct damon_target *t, int nr_subs)
{
struct damon_region *r, *next;
unsigned long sz_region, sz_sub = 0;
@@ -969,7 +976,7 @@ static void damon_split_regions_of(struct damon_ctx *ctx,
if (sz_sub == 0 || sz_sub >= sz_region)
continue;
- damon_split_region_at(ctx, t, r, sz_sub);
+ damon_split_region_at(t, r, sz_sub);
sz_region = sz_sub;
}
}
@@ -995,16 +1002,16 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
damon_for_each_target(t, ctx)
nr_regions += damon_nr_regions(t);
- if (nr_regions > ctx->max_nr_regions / 2)
+ if (nr_regions > ctx->attrs.max_nr_regions / 2)
return;
/* Maybe the middle of the region has different access frequency */
if (last_nr_regions == nr_regions &&
- nr_regions < ctx->max_nr_regions / 3)
+ nr_regions < ctx->attrs.max_nr_regions / 3)
nr_subregions = 3;
damon_for_each_target(t, ctx)
- damon_split_regions_of(ctx, t, nr_subregions);
+ damon_split_regions_of(t, nr_subregions);
last_nr_regions = nr_regions;
}
@@ -1018,7 +1025,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx)
static bool kdamond_need_update_operations(struct damon_ctx *ctx)
{
return damon_check_reset_time_interval(&ctx->last_ops_update,
- ctx->ops_update_interval);
+ ctx->attrs.ops_update_interval);
}
/*
@@ -1142,32 +1149,27 @@ static int kdamond_fn(void *data)
struct damon_region *r, *next;
unsigned int max_nr_accesses = 0;
unsigned long sz_limit = 0;
- bool done = false;
pr_debug("kdamond (%d) starts\n", current->pid);
if (ctx->ops.init)
ctx->ops.init(ctx);
if (ctx->callback.before_start && ctx->callback.before_start(ctx))
- done = true;
+ goto done;
sz_limit = damon_region_sz_limit(ctx);
- while (!kdamond_need_stop(ctx) && !done) {
- if (kdamond_wait_activation(ctx)) {
- done = true;
- continue;
- }
+ while (!kdamond_need_stop(ctx)) {
+ if (kdamond_wait_activation(ctx))
+ break;
if (ctx->ops.prepare_access_checks)
ctx->ops.prepare_access_checks(ctx);
if (ctx->callback.after_sampling &&
- ctx->callback.after_sampling(ctx)) {
- done = true;
- continue;
- }
+ ctx->callback.after_sampling(ctx))
+ break;
- kdamond_usleep(ctx->sample_interval);
+ kdamond_usleep(ctx->attrs.sample_interval);
if (ctx->ops.check_accesses)
max_nr_accesses = ctx->ops.check_accesses(ctx);
@@ -1177,10 +1179,8 @@ static int kdamond_fn(void *data)
max_nr_accesses / 10,
sz_limit);
if (ctx->callback.after_aggregation &&
- ctx->callback.after_aggregation(ctx)) {
- done = true;
- continue;
- }
+ ctx->callback.after_aggregation(ctx))
+ break;
kdamond_apply_schemes(ctx);
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
@@ -1194,6 +1194,7 @@ static int kdamond_fn(void *data)
sz_limit = damon_region_sz_limit(ctx);
}
}
+done:
damon_for_each_target(t, ctx) {
damon_for_each_region_safe(r, next, t)
damon_destroy_region(r, t);
@@ -1218,4 +1219,90 @@ static int kdamond_fn(void *data)
return 0;
}
+/*
+ * struct damon_system_ram_region - System RAM resource address region of
+ * [@start, @end).
+ * @start: Start address of the region (inclusive).
+ * @end: End address of the region (exclusive).
+ */
+struct damon_system_ram_region {
+ unsigned long start;
+ unsigned long end;
+};
+
+static int walk_system_ram(struct resource *res, void *arg)
+{
+ struct damon_system_ram_region *a = arg;
+
+ if (a->end - a->start < resource_size(res)) {
+ a->start = res->start;
+ a->end = res->end;
+ }
+ return 0;
+}
+
+/*
+ * Find biggest 'System RAM' resource and store its start and end address in
+ * @start and @end, respectively. If no System RAM is found, returns false.
+ */
+static bool damon_find_biggest_system_ram(unsigned long *start,
+ unsigned long *end)
+
+{
+ struct damon_system_ram_region arg = {};
+
+ walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
+ if (arg.end <= arg.start)
+ return false;
+
+ *start = arg.start;
+ *end = arg.end;
+ return true;
+}
+
+/**
+ * damon_set_region_biggest_system_ram_default() - Set the region of the given
+ * monitoring target as requested, or biggest 'System RAM'.
+ * @t: The monitoring target to set the region.
+ * @start: The pointer to the start address of the region.
+ * @end: The pointer to the end address of the region.
+ *
+ * This function sets the region of @t as requested by @start and @end. If the
+ * values of @start and @end are zero, however, this function finds the biggest
+ * 'System RAM' resource and sets the region to cover the resource. In the
+ * latter case, this function saves the start and end addresses of the resource
+ * in @start and @end, respectively.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end)
+{
+ struct damon_addr_range addr_range;
+
+ if (*start > *end)
+ return -EINVAL;
+
+ if (!*start && !*end &&
+ !damon_find_biggest_system_ram(start, end))
+ return -EINVAL;
+
+ addr_range.start = *start;
+ addr_range.end = *end;
+ return damon_set_regions(t, &addr_range, 1);
+}
+
+static int __init damon_init(void)
+{
+ damon_region_cache = KMEM_CACHE(damon_region, 0);
+ if (unlikely(!damon_region_cache)) {
+ pr_err("creating damon_region_cache fails\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+subsys_initcall(damon_init);
+
#include "core-test.h"
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 4e51466c4e74..6f0ae7d3ae39 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -55,9 +55,9 @@ static ssize_t dbgfs_attrs_read(struct file *file,
mutex_lock(&ctx->kdamond_lock);
ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n",
- ctx->sample_interval, ctx->aggr_interval,
- ctx->ops_update_interval, ctx->min_nr_regions,
- ctx->max_nr_regions);
+ ctx->attrs.sample_interval, ctx->attrs.aggr_interval,
+ ctx->attrs.ops_update_interval,
+ ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions);
mutex_unlock(&ctx->kdamond_lock);
return simple_read_from_buffer(buf, count, ppos, kbuf, ret);
@@ -67,7 +67,7 @@ static ssize_t dbgfs_attrs_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct damon_ctx *ctx = file->private_data;
- unsigned long s, a, r, minr, maxr;
+ struct damon_attrs attrs;
char *kbuf;
ssize_t ret;
@@ -76,7 +76,10 @@ static ssize_t dbgfs_attrs_write(struct file *file,
return PTR_ERR(kbuf);
if (sscanf(kbuf, "%lu %lu %lu %lu %lu",
- &s, &a, &r, &minr, &maxr) != 5) {
+ &attrs.sample_interval, &attrs.aggr_interval,
+ &attrs.ops_update_interval,
+ &attrs.min_nr_regions,
+ &attrs.max_nr_regions) != 5) {
ret = -EINVAL;
goto out;
}
@@ -87,7 +90,7 @@ static ssize_t dbgfs_attrs_write(struct file *file,
goto unlock_out;
}
- ret = damon_set_attrs(ctx, s, a, r, minr, maxr);
+ ret = damon_set_attrs(ctx, &attrs);
if (!ret)
ret = count;
unlock_out:
@@ -131,9 +134,12 @@ static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len)
damon_for_each_scheme(s, c) {
rc = scnprintf(&buf[written], len - written,
"%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
- s->min_sz_region, s->max_sz_region,
- s->min_nr_accesses, s->max_nr_accesses,
- s->min_age_region, s->max_age_region,
+ s->pattern.min_sz_region,
+ s->pattern.max_sz_region,
+ s->pattern.min_nr_accesses,
+ s->pattern.max_nr_accesses,
+ s->pattern.min_age_region,
+ s->pattern.max_age_region,
damos_action_to_dbgfs_scheme_action(s->action),
s->quota.ms, s->quota.sz,
s->quota.reset_interval,
@@ -221,8 +227,6 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
struct damos *scheme, **schemes;
const int max_nr_schemes = 256;
int pos = 0, parsed, ret;
- unsigned long min_sz, max_sz;
- unsigned int min_nr_a, max_nr_a, min_age, max_age;
unsigned int action_input;
enum damos_action action;
@@ -233,13 +237,18 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
*nr_schemes = 0;
while (pos < len && *nr_schemes < max_nr_schemes) {
+ struct damos_access_pattern pattern = {};
struct damos_quota quota = {};
struct damos_watermarks wmarks;
ret = sscanf(&str[pos],
"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n",
- &min_sz, &max_sz, &min_nr_a, &max_nr_a,
- &min_age, &max_age, &action_input, &quota.ms,
+ &pattern.min_sz_region, &pattern.max_sz_region,
+ &pattern.min_nr_accesses,
+ &pattern.max_nr_accesses,
+ &pattern.min_age_region,
+ &pattern.max_age_region,
+ &action_input, &quota.ms,
&quota.sz, &quota.reset_interval,
&quota.weight_sz, &quota.weight_nr_accesses,
&quota.weight_age, &wmarks.metric,
@@ -251,7 +260,9 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
if ((int)action < 0)
goto fail;
- if (min_sz > max_sz || min_nr_a > max_nr_a || min_age > max_age)
+ if (pattern.min_sz_region > pattern.max_sz_region ||
+ pattern.min_nr_accesses > pattern.max_nr_accesses ||
+ pattern.min_age_region > pattern.max_age_region)
goto fail;
if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low ||
@@ -259,8 +270,7 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
goto fail;
pos += parsed;
- scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
- min_age, max_age, action, &quota, &wmarks);
+ scheme = damon_new_scheme(&pattern, action, &quota, &wmarks);
if (!scheme)
goto fail;
@@ -297,11 +307,9 @@ static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf,
goto unlock_out;
}
- ret = damon_set_schemes(ctx, schemes, nr_schemes);
- if (!ret) {
- ret = count;
- nr_schemes = 0;
- }
+ damon_set_schemes(ctx, schemes, nr_schemes);
+ ret = count;
+ nr_schemes = 0;
unlock_out:
mutex_unlock(&ctx->kdamond_lock);
@@ -1053,7 +1061,7 @@ static int __init __damon_dbgfs_init(void)
fops[i]);
dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]);
- dbgfs_dirs = kmalloc_array(1, sizeof(dbgfs_root), GFP_KERNEL);
+ dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL);
if (!dbgfs_dirs) {
debugfs_remove(dbgfs_root);
return -ENOMEM;
diff --git a/mm/damon/lru_sort.c b/mm/damon/lru_sort.c
index 9de6f00a71c5..efbc2bda8b9c 100644
--- a/mm/damon/lru_sort.c
+++ b/mm/damon/lru_sort.c
@@ -13,6 +13,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include "modules-common.h"
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -63,109 +65,35 @@ module_param(hot_thres_access_freq, ulong, 0600);
static unsigned long cold_min_age __read_mostly = 120000000;
module_param(cold_min_age, ulong, 0600);
-/*
- * Limit of time for trying the LRU lists sorting in milliseconds.
- *
- * DAMON_LRU_SORT tries to use only up to this time within a time window
- * (quota_reset_interval_ms) for trying LRU lists sorting. This can be used
- * for limiting CPU consumption of DAMON_LRU_SORT. If the value is zero, the
- * limit is disabled.
- *
- * 10 ms by default.
- */
-static unsigned long quota_ms __read_mostly = 10;
-module_param(quota_ms, ulong, 0600);
-
-/*
- * The time quota charge reset interval in milliseconds.
- *
- * The charge reset interval for the quota of time (quota_ms). That is,
- * DAMON_LRU_SORT does not try LRU-lists sorting for more than quota_ms
- * milliseconds or quota_sz bytes within quota_reset_interval_ms milliseconds.
- *
- * 1 second by default.
- */
-static unsigned long quota_reset_interval_ms __read_mostly = 1000;
-module_param(quota_reset_interval_ms, ulong, 0600);
-
-/*
- * The watermarks check time interval in microseconds.
- *
- * Minimal time to wait before checking the watermarks, when DAMON_LRU_SORT is
- * enabled but inactive due to its watermarks rule. 5 seconds by default.
- */
-static unsigned long wmarks_interval __read_mostly = 5000000;
-module_param(wmarks_interval, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the high watermark.
- *
- * If free memory of the system in bytes per thousand bytes is higher than
- * this, DAMON_LRU_SORT becomes inactive, so it does nothing but periodically
- * checks the watermarks. 200 (20%) by default.
- */
-static unsigned long wmarks_high __read_mostly = 200;
-module_param(wmarks_high, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the middle watermark.
- *
- * If free memory of the system in bytes per thousand bytes is between this and
- * the low watermark, DAMON_LRU_SORT becomes active, so starts the monitoring
- * and the LRU-lists sorting. 150 (15%) by default.
- */
-static unsigned long wmarks_mid __read_mostly = 150;
-module_param(wmarks_mid, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the low watermark.
- *
- * If free memory of the system in bytes per thousand bytes is lower than this,
- * DAMON_LRU_SORT becomes inactive, so it does nothing but periodically checks
- * the watermarks. 50 (5%) by default.
- */
-static unsigned long wmarks_low __read_mostly = 50;
-module_param(wmarks_low, ulong, 0600);
-
-/*
- * Sampling interval for the monitoring in microseconds.
- *
- * The sampling interval of DAMON for the hot/cold memory monitoring. Please
- * refer to the DAMON documentation for more detail. 5 ms by default.
- */
-static unsigned long sample_interval __read_mostly = 5000;
-module_param(sample_interval, ulong, 0600);
-
-/*
- * Aggregation interval for the monitoring in microseconds.
- *
- * The aggregation interval of DAMON for the hot/cold memory monitoring.
- * Please refer to the DAMON documentation for more detail. 100 ms by default.
- */
-static unsigned long aggr_interval __read_mostly = 100000;
-module_param(aggr_interval, ulong, 0600);
-
-/*
- * Minimum number of monitoring regions.
- *
- * The minimal number of monitoring regions of DAMON for the hot/cold memory
- * monitoring. This can be used to set lower-bound of the monitoring quality.
- * But, setting this too high could result in increased monitoring overhead.
- * Please refer to the DAMON documentation for more detail. 10 by default.
- */
-static unsigned long min_nr_regions __read_mostly = 10;
-module_param(min_nr_regions, ulong, 0600);
-
-/*
- * Maximum number of monitoring regions.
- *
- * The maximum number of monitoring regions of DAMON for the hot/cold memory
- * monitoring. This can be used to set upper-bound of the monitoring overhead.
- * However, setting this too low could result in bad monitoring quality.
- * Please refer to the DAMON documentation for more detail. 1000 by default.
- */
-static unsigned long max_nr_regions __read_mostly = 1000;
-module_param(max_nr_regions, ulong, 0600);
+static struct damos_quota damon_lru_sort_quota = {
+ /* Use up to 10 ms per 1 sec, by default */
+ .ms = 10,
+ .sz = 0,
+ .reset_interval = 1000,
+ /* Within the quota, mark hotter regions accessed first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 1,
+ .weight_age = 0,
+};
+DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(damon_lru_sort_quota);
+
+static struct damos_watermarks damon_lru_sort_wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = 5000000, /* 5 seconds */
+ .high = 200, /* 20 percent */
+ .mid = 150, /* 15 percent */
+ .low = 50, /* 5 percent */
+};
+DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_lru_sort_wmarks);
+
+static struct damon_attrs damon_lru_sort_mon_attrs = {
+ .sample_interval = 5000, /* 5 ms */
+ .aggr_interval = 100000, /* 100 ms */
+ .ops_update_interval = 0,
+ .min_nr_regions = 10,
+ .max_nr_regions = 1000,
+};
+DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_lru_sort_mon_attrs);
/*
* Start of the target memory region in physical address.
@@ -194,222 +122,97 @@ module_param(monitor_region_end, ulong, 0600);
static int kdamond_pid __read_mostly = -1;
module_param(kdamond_pid, int, 0400);
-/*
- * Number of hot memory regions that tried to be LRU-sorted.
- */
-static unsigned long nr_lru_sort_tried_hot_regions __read_mostly;
-module_param(nr_lru_sort_tried_hot_regions, ulong, 0400);
-
-/*
- * Total bytes of hot memory regions that tried to be LRU-sorted.
- */
-static unsigned long bytes_lru_sort_tried_hot_regions __read_mostly;
-module_param(bytes_lru_sort_tried_hot_regions, ulong, 0400);
-
-/*
- * Number of hot memory regions that successfully be LRU-sorted.
- */
-static unsigned long nr_lru_sorted_hot_regions __read_mostly;
-module_param(nr_lru_sorted_hot_regions, ulong, 0400);
-
-/*
- * Total bytes of hot memory regions that successfully be LRU-sorted.
- */
-static unsigned long bytes_lru_sorted_hot_regions __read_mostly;
-module_param(bytes_lru_sorted_hot_regions, ulong, 0400);
-
-/*
- * Number of times that the time quota limit for hot regions have exceeded
- */
-static unsigned long nr_hot_quota_exceeds __read_mostly;
-module_param(nr_hot_quota_exceeds, ulong, 0400);
-
-/*
- * Number of cold memory regions that tried to be LRU-sorted.
- */
-static unsigned long nr_lru_sort_tried_cold_regions __read_mostly;
-module_param(nr_lru_sort_tried_cold_regions, ulong, 0400);
-
-/*
- * Total bytes of cold memory regions that tried to be LRU-sorted.
- */
-static unsigned long bytes_lru_sort_tried_cold_regions __read_mostly;
-module_param(bytes_lru_sort_tried_cold_regions, ulong, 0400);
-
-/*
- * Number of cold memory regions that successfully be LRU-sorted.
- */
-static unsigned long nr_lru_sorted_cold_regions __read_mostly;
-module_param(nr_lru_sorted_cold_regions, ulong, 0400);
-
-/*
- * Total bytes of cold memory regions that successfully be LRU-sorted.
- */
-static unsigned long bytes_lru_sorted_cold_regions __read_mostly;
-module_param(bytes_lru_sorted_cold_regions, ulong, 0400);
-
-/*
- * Number of times that the time quota limit for cold regions have exceeded
- */
-static unsigned long nr_cold_quota_exceeds __read_mostly;
-module_param(nr_cold_quota_exceeds, ulong, 0400);
+static struct damos_stat damon_lru_sort_hot_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
+ lru_sort_tried_hot_regions, lru_sorted_hot_regions,
+ hot_quota_exceeds);
+
+static struct damos_stat damon_lru_sort_cold_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_cold_stat,
+ lru_sort_tried_cold_regions, lru_sorted_cold_regions,
+ cold_quota_exceeds);
+
+static struct damos_access_pattern damon_lru_sort_stub_pattern = {
+ /* Find regions having PAGE_SIZE or larger size */
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ /* no matter its access frequency */
+ .min_nr_accesses = 0,
+ .max_nr_accesses = UINT_MAX,
+ /* no matter its age */
+ .min_age_region = 0,
+ .max_age_region = UINT_MAX,
+};
static struct damon_ctx *ctx;
static struct damon_target *target;
-struct damon_lru_sort_ram_walk_arg {
- unsigned long start;
- unsigned long end;
-};
-
-static int walk_system_ram(struct resource *res, void *arg)
-{
- struct damon_lru_sort_ram_walk_arg *a = arg;
-
- if (a->end - a->start < resource_size(res)) {
- a->start = res->start;
- a->end = res->end;
- }
- return 0;
-}
-
-/*
- * Find biggest 'System RAM' resource and store its start and end address in
- * @start and @end, respectively. If no System RAM is found, returns false.
- */
-static bool get_monitoring_region(unsigned long *start, unsigned long *end)
+static struct damos *damon_lru_sort_new_scheme(
+ struct damos_access_pattern *pattern, enum damos_action action)
{
- struct damon_lru_sort_ram_walk_arg arg = {};
+ struct damos_quota quota = damon_lru_sort_quota;
- walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
- if (arg.end <= arg.start)
- return false;
+ /* Use half of total quota for hot/cold pages sorting */
+ quota.ms = quota.ms / 2;
- *start = arg.start;
- *end = arg.end;
- return true;
+ return damon_new_scheme(
+ /* find the pattern, and */
+ pattern,
+ /* (de)prioritize on LRU-lists */
+ action,
+ /* under the quota. */
+ &quota,
+ /* (De)activate this according to the watermarks. */
+ &damon_lru_sort_wmarks);
}
/* Create a DAMON-based operation scheme for hot memory regions */
static struct damos *damon_lru_sort_new_hot_scheme(unsigned int hot_thres)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try LRU-lists sorting of hot pages for more than half
- * of quota_ms milliseconds within quota_reset_interval_ms.
- */
- .ms = quota_ms / 2,
- .sz = 0,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, mark hotter regions accessed first. */
- .weight_sz = 0,
- .weight_nr_accesses = 1,
- .weight_age = 0,
- };
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and accessed for more than the threshold */
- hot_thres, UINT_MAX,
- /* no matter its age */
- 0, UINT_MAX,
- /* prioritize those on LRU lists, as soon as found */
- DAMOS_LRU_PRIO,
- /* under the quota. */
- &quota,
- /* (De)activate this according to the watermarks. */
- &wmarks);
+ struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
- return scheme;
+ pattern.min_nr_accesses = hot_thres;
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_PRIO);
}
/* Create a DAMON-based operation scheme for cold memory regions */
static struct damos *damon_lru_sort_new_cold_scheme(unsigned int cold_thres)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try LRU-lists sorting of cold pages for more than
- * half of quota_ms milliseconds within
- * quota_reset_interval_ms.
- */
- .ms = quota_ms / 2,
- .sz = 0,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, mark colder regions not accessed first. */
- .weight_sz = 0,
- .weight_nr_accesses = 0,
- .weight_age = 1,
- };
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and not accessed at all */
- 0, 0,
- /* for cold_thres or more micro-seconds, and */
- cold_thres, UINT_MAX,
- /* mark those as not accessed, as soon as found */
- DAMOS_LRU_DEPRIO,
- /* under the quota. */
- &quota,
- /* (De)activate this according to the watermarks. */
- &wmarks);
+ struct damos_access_pattern pattern = damon_lru_sort_stub_pattern;
- return scheme;
+ pattern.max_nr_accesses = 0;
+ pattern.min_age_region = cold_thres;
+ return damon_lru_sort_new_scheme(&pattern, DAMOS_LRU_DEPRIO);
}
static int damon_lru_sort_apply_parameters(void)
{
- struct damos *scheme, *next_scheme;
- struct damon_addr_range addr_range;
+ struct damos *scheme;
unsigned int hot_thres, cold_thres;
int err = 0;
- err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
- min_nr_regions, max_nr_regions);
+ err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
if (err)
return err;
- /* free previously set schemes */
- damon_for_each_scheme_safe(scheme, next_scheme, ctx)
- damon_destroy_scheme(scheme);
-
/* aggr_interval / sample_interval is the maximum nr_accesses */
- hot_thres = aggr_interval / sample_interval * hot_thres_access_freq /
- 1000;
+ hot_thres = damon_lru_sort_mon_attrs.aggr_interval /
+ damon_lru_sort_mon_attrs.sample_interval *
+ hot_thres_access_freq / 1000;
scheme = damon_lru_sort_new_hot_scheme(hot_thres);
if (!scheme)
return -ENOMEM;
- damon_add_scheme(ctx, scheme);
+ damon_set_schemes(ctx, &scheme, 1);
- cold_thres = cold_min_age / aggr_interval;
+ cold_thres = cold_min_age / damon_lru_sort_mon_attrs.aggr_interval;
scheme = damon_lru_sort_new_cold_scheme(cold_thres);
if (!scheme)
return -ENOMEM;
damon_add_scheme(ctx, scheme);
- if (monitor_region_start > monitor_region_end)
- return -EINVAL;
- if (!monitor_region_start && !monitor_region_end &&
- !get_monitoring_region(&monitor_region_start,
- &monitor_region_end))
- return -EINVAL;
- addr_range.start = monitor_region_start;
- addr_range.end = monitor_region_end;
- return damon_set_regions(target, &addr_range, 1);
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+ &monitor_region_end);
}
static int damon_lru_sort_turn(bool on)
@@ -495,19 +298,10 @@ static int damon_lru_sort_after_aggregation(struct damon_ctx *c)
/* update the stats parameter */
damon_for_each_scheme(s, c) {
- if (s->action == DAMOS_LRU_PRIO) {
- nr_lru_sort_tried_hot_regions = s->stat.nr_tried;
- bytes_lru_sort_tried_hot_regions = s->stat.sz_tried;
- nr_lru_sorted_hot_regions = s->stat.nr_applied;
- bytes_lru_sorted_hot_regions = s->stat.sz_applied;
- nr_hot_quota_exceeds = s->stat.qt_exceeds;
- } else if (s->action == DAMOS_LRU_DEPRIO) {
- nr_lru_sort_tried_cold_regions = s->stat.nr_tried;
- bytes_lru_sort_tried_cold_regions = s->stat.sz_tried;
- nr_lru_sorted_cold_regions = s->stat.nr_applied;
- bytes_lru_sorted_cold_regions = s->stat.sz_applied;
- nr_cold_quota_exceeds = s->stat.qt_exceeds;
- }
+ if (s->action == DAMOS_LRU_PRIO)
+ damon_lru_sort_hot_stat = s->stat;
+ else if (s->action == DAMOS_LRU_DEPRIO)
+ damon_lru_sort_cold_stat = s->stat;
}
return damon_lru_sort_handle_commit_inputs();
diff --git a/mm/damon/modules-common.h b/mm/damon/modules-common.h
new file mode 100644
index 000000000000..5a4921851d32
--- /dev/null
+++ b/mm/damon/modules-common.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Primitives for DAMON Modules
+ *
+ * Author: SeongJae Park <sj@kernel.org>
+ */
+
+#include <linux/moduleparam.h>
+
+#define DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(attrs) \
+ module_param_named(sample_interval, attrs.sample_interval, \
+ ulong, 0600); \
+ module_param_named(aggr_interval, attrs.aggr_interval, ulong, \
+ 0600); \
+ module_param_named(min_nr_regions, attrs.min_nr_regions, ulong, \
+ 0600); \
+ module_param_named(max_nr_regions, attrs.max_nr_regions, ulong, \
+ 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(quota) \
+ module_param_named(quota_ms, quota.ms, ulong, 0600); \
+ module_param_named(quota_reset_interval_ms, \
+ quota.reset_interval, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_QUOTAS(quota) \
+ DEFINE_DAMON_MODULES_DAMOS_TIME_QUOTA(quota) \
+ module_param_named(quota_sz, quota.sz, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_WMARKS_PARAMS(wmarks) \
+ module_param_named(wmarks_interval, wmarks.interval, ulong, \
+ 0600); \
+ module_param_named(wmarks_high, wmarks.high, ulong, 0600); \
+ module_param_named(wmarks_mid, wmarks.mid, ulong, 0600); \
+ module_param_named(wmarks_low, wmarks.low, ulong, 0600);
+
+#define DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(stat, try_name, \
+ succ_name, qt_exceed_name) \
+ module_param_named(nr_##try_name, stat.nr_tried, ulong, 0400); \
+ module_param_named(bytes_##try_name, stat.sz_tried, ulong, \
+ 0400); \
+ module_param_named(nr_##succ_name, stat.nr_applied, ulong, \
+ 0400); \
+ module_param_named(bytes_##succ_name, stat.sz_applied, ulong, \
+ 0400); \
+ module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \
+ 0400);
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index b1335de200e7..75409601f934 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -88,7 +88,7 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
#define DAMON_MAX_SUBSCORE (100)
#define DAMON_MAX_AGE_IN_LOG (32)
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
+int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s)
{
unsigned int max_nr_accesses;
@@ -99,10 +99,10 @@ int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
unsigned int age_weight = s->quota.weight_age;
int hotness;
- max_nr_accesses = c->aggr_interval / c->sample_interval;
+ max_nr_accesses = c->attrs.aggr_interval / c->attrs.sample_interval;
freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
- age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
+ age_in_sec = (unsigned long)r->age * c->attrs.aggr_interval / 1000000;
for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
age_in_log++, age_in_sec >>= 1)
;
@@ -127,48 +127,14 @@ int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
*/
hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
- /* Return coldness of the region */
- return DAMOS_MAX_SCORE - hotness;
+ return hotness;
}
-int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
+int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s)
{
- unsigned int max_nr_accesses;
- int freq_subscore;
- unsigned int age_in_sec;
- int age_in_log, age_subscore;
- unsigned int freq_weight = s->quota.weight_nr_accesses;
- unsigned int age_weight = s->quota.weight_age;
- int hotness;
-
- max_nr_accesses = c->aggr_interval / c->sample_interval;
- freq_subscore = r->nr_accesses * DAMON_MAX_SUBSCORE / max_nr_accesses;
-
- age_in_sec = (unsigned long)r->age * c->aggr_interval / 1000000;
- for (age_in_log = 0; age_in_log < DAMON_MAX_AGE_IN_LOG && age_in_sec;
- age_in_log++, age_in_sec >>= 1)
- ;
+ int hotness = damon_hot_score(c, r, s);
- /* If frequency is 0, higher age means it's colder */
- if (freq_subscore == 0)
- age_in_log *= -1;
-
- /*
- * Now age_in_log is in [-DAMON_MAX_AGE_IN_LOG, DAMON_MAX_AGE_IN_LOG].
- * Scale it to be in [0, 100] and set it as age subscore.
- */
- age_in_log += DAMON_MAX_AGE_IN_LOG;
- age_subscore = age_in_log * DAMON_MAX_SUBSCORE /
- DAMON_MAX_AGE_IN_LOG / 2;
-
- hotness = (freq_weight * freq_subscore + age_weight * age_subscore);
- if (freq_weight + age_weight)
- hotness /= freq_weight + age_weight;
- /*
- * Transform it to fit in [0, DAMOS_MAX_SCORE]
- */
- hotness = hotness * DAMOS_MAX_SCORE / DAMON_MAX_SUBSCORE;
-
- return hotness;
+ /* Return coldness of the region */
+ return DAMOS_MAX_SCORE - hotness;
}
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index 52329ff361cd..8d82d3722204 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -12,7 +12,7 @@ struct page *damon_get_page(unsigned long pfn);
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
-int damon_pageout_score(struct damon_ctx *c, struct damon_region *r,
+int damon_cold_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s);
int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
struct damos *s);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index dc131c6a5403..e1a4315c4be6 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -63,8 +63,7 @@ out:
folio_put(folio);
}
-static void __damon_pa_prepare_access_check(struct damon_ctx *ctx,
- struct damon_region *r)
+static void __damon_pa_prepare_access_check(struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -78,7 +77,7 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t)
- __damon_pa_prepare_access_check(ctx, r);
+ __damon_pa_prepare_access_check(r);
}
}
@@ -166,8 +165,7 @@ out:
return result.accessed;
}
-static void __damon_pa_check_access(struct damon_ctx *ctx,
- struct damon_region *r)
+static void __damon_pa_check_access(struct damon_region *r)
{
static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE;
@@ -196,7 +194,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
damon_for_each_target(t, ctx) {
damon_for_each_region(r, t) {
- __damon_pa_check_access(ctx, r);
+ __damon_pa_check_access(r);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
}
}
@@ -233,7 +231,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r)
return applied * PAGE_SIZE;
}
-static unsigned long damon_pa_mark_accessed(struct damon_region *r)
+static inline unsigned long damon_pa_mark_accessed_or_deactivate(
+ struct damon_region *r, bool mark_accessed)
{
unsigned long addr, applied = 0;
@@ -242,27 +241,24 @@ static unsigned long damon_pa_mark_accessed(struct damon_region *r)
if (!page)
continue;
- mark_page_accessed(page);
+ if (mark_accessed)
+ mark_page_accessed(page);
+ else
+ deactivate_page(page);
put_page(page);
applied++;
}
return applied * PAGE_SIZE;
}
-static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+static unsigned long damon_pa_mark_accessed(struct damon_region *r)
{
- unsigned long addr, applied = 0;
-
- for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct page *page = damon_get_page(PHYS_PFN(addr));
+ return damon_pa_mark_accessed_or_deactivate(r, true);
+}
- if (!page)
- continue;
- deactivate_page(page);
- put_page(page);
- applied++;
- }
- return applied * PAGE_SIZE;
+static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+{
+ return damon_pa_mark_accessed_or_deactivate(r, false);
}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
@@ -276,7 +272,10 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
return damon_pa_mark_accessed(r);
case DAMOS_LRU_DEPRIO:
return damon_pa_deactivate_pages(r);
+ case DAMOS_STAT:
+ break;
default:
+ /* DAMOS actions that not yet supported by 'paddr'. */
break;
}
return 0;
@@ -288,11 +287,11 @@ static int damon_pa_scheme_score(struct damon_ctx *context,
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
case DAMOS_LRU_PRIO:
return damon_hot_score(context, r, scheme);
case DAMOS_LRU_DEPRIO:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
default:
break;
}
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index a7faf51b4bd4..162c9b1ca00f 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -13,6 +13,8 @@
#include <linux/sched.h>
#include <linux/workqueue.h>
+#include "modules-common.h"
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -50,124 +52,35 @@ module_param(commit_inputs, bool, 0600);
static unsigned long min_age __read_mostly = 120000000;
module_param(min_age, ulong, 0600);
-/*
- * Limit of time for trying the reclamation in milliseconds.
- *
- * DAMON_RECLAIM tries to use only up to this time within a time window
- * (quota_reset_interval_ms) for trying reclamation of cold pages. This can be
- * used for limiting CPU consumption of DAMON_RECLAIM. If the value is zero,
- * the limit is disabled.
- *
- * 10 ms by default.
- */
-static unsigned long quota_ms __read_mostly = 10;
-module_param(quota_ms, ulong, 0600);
-
-/*
- * Limit of size of memory for the reclamation in bytes.
- *
- * DAMON_RECLAIM charges amount of memory which it tried to reclaim within a
- * time window (quota_reset_interval_ms) and makes no more than this limit is
- * tried. This can be used for limiting consumption of CPU and IO. If this
- * value is zero, the limit is disabled.
- *
- * 128 MiB by default.
- */
-static unsigned long quota_sz __read_mostly = 128 * 1024 * 1024;
-module_param(quota_sz, ulong, 0600);
-
-/*
- * The time/size quota charge reset interval in milliseconds.
- *
- * The charge reset interval for the quota of time (quota_ms) and size
- * (quota_sz). That is, DAMON_RECLAIM does not try reclamation for more than
- * quota_ms milliseconds or quota_sz bytes within quota_reset_interval_ms
- * milliseconds.
- *
- * 1 second by default.
- */
-static unsigned long quota_reset_interval_ms __read_mostly = 1000;
-module_param(quota_reset_interval_ms, ulong, 0600);
-
-/*
- * The watermarks check time interval in microseconds.
- *
- * Minimal time to wait before checking the watermarks, when DAMON_RECLAIM is
- * enabled but inactive due to its watermarks rule. 5 seconds by default.
- */
-static unsigned long wmarks_interval __read_mostly = 5000000;
-module_param(wmarks_interval, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the high watermark.
- *
- * If free memory of the system in bytes per thousand bytes is higher than
- * this, DAMON_RECLAIM becomes inactive, so it does nothing but periodically
- * checks the watermarks. 500 (50%) by default.
- */
-static unsigned long wmarks_high __read_mostly = 500;
-module_param(wmarks_high, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the middle watermark.
- *
- * If free memory of the system in bytes per thousand bytes is between this and
- * the low watermark, DAMON_RECLAIM becomes active, so starts the monitoring
- * and the reclaiming. 400 (40%) by default.
- */
-static unsigned long wmarks_mid __read_mostly = 400;
-module_param(wmarks_mid, ulong, 0600);
-
-/*
- * Free memory rate (per thousand) for the low watermark.
- *
- * If free memory of the system in bytes per thousand bytes is lower than this,
- * DAMON_RECLAIM becomes inactive, so it does nothing but periodically checks
- * the watermarks. In the case, the system falls back to the LRU-based page
- * granularity reclamation logic. 200 (20%) by default.
- */
-static unsigned long wmarks_low __read_mostly = 200;
-module_param(wmarks_low, ulong, 0600);
-
-/*
- * Sampling interval for the monitoring in microseconds.
- *
- * The sampling interval of DAMON for the cold memory monitoring. Please refer
- * to the DAMON documentation for more detail. 5 ms by default.
- */
-static unsigned long sample_interval __read_mostly = 5000;
-module_param(sample_interval, ulong, 0600);
-
-/*
- * Aggregation interval for the monitoring in microseconds.
- *
- * The aggregation interval of DAMON for the cold memory monitoring. Please
- * refer to the DAMON documentation for more detail. 100 ms by default.
- */
-static unsigned long aggr_interval __read_mostly = 100000;
-module_param(aggr_interval, ulong, 0600);
-
-/*
- * Minimum number of monitoring regions.
- *
- * The minimal number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set lower-bound of the monitoring quality.
- * But, setting this too high could result in increased monitoring overhead.
- * Please refer to the DAMON documentation for more detail. 10 by default.
- */
-static unsigned long min_nr_regions __read_mostly = 10;
-module_param(min_nr_regions, ulong, 0600);
-
-/*
- * Maximum number of monitoring regions.
- *
- * The maximum number of monitoring regions of DAMON for the cold memory
- * monitoring. This can be used to set upper-bound of the monitoring overhead.
- * However, setting this too low could result in bad monitoring quality.
- * Please refer to the DAMON documentation for more detail. 1000 by default.
- */
-static unsigned long max_nr_regions __read_mostly = 1000;
-module_param(max_nr_regions, ulong, 0600);
+static struct damos_quota damon_reclaim_quota = {
+ /* use up to 10 ms time, reclaim up to 128 MiB per 1 sec by default */
+ .ms = 10,
+ .sz = 128 * 1024 * 1024,
+ .reset_interval = 1000,
+ /* Within the quota, page out older regions first. */
+ .weight_sz = 0,
+ .weight_nr_accesses = 0,
+ .weight_age = 1
+};
+DEFINE_DAMON_MODULES_DAMOS_QUOTAS(damon_reclaim_quota);
+
+static struct damos_watermarks damon_reclaim_wmarks = {
+ .metric = DAMOS_WMARK_FREE_MEM_RATE,
+ .interval = 5000000, /* 5 seconds */
+ .high = 500, /* 50 percent */
+ .mid = 400, /* 40 percent */
+ .low = 200, /* 20 percent */
+};
+DEFINE_DAMON_MODULES_WMARKS_PARAMS(damon_reclaim_wmarks);
+
+static struct damon_attrs damon_reclaim_mon_attrs = {
+ .sample_interval = 5000, /* 5 ms */
+ .aggr_interval = 100000, /* 100 ms */
+ .ops_update_interval = 0,
+ .min_nr_regions = 10,
+ .max_nr_regions = 1000,
+};
+DEFINE_DAMON_MODULES_MON_ATTRS_PARAMS(damon_reclaim_mon_attrs);
/*
* Start of the target memory region in physical address.
@@ -196,119 +109,44 @@ module_param(monitor_region_end, ulong, 0600);
static int kdamond_pid __read_mostly = -1;
module_param(kdamond_pid, int, 0400);
-/*
- * Number of memory regions that tried to be reclaimed.
- */
-static unsigned long nr_reclaim_tried_regions __read_mostly;
-module_param(nr_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that tried to be reclaimed.
- */
-static unsigned long bytes_reclaim_tried_regions __read_mostly;
-module_param(bytes_reclaim_tried_regions, ulong, 0400);
-
-/*
- * Number of memory regions that successfully be reclaimed.
- */
-static unsigned long nr_reclaimed_regions __read_mostly;
-module_param(nr_reclaimed_regions, ulong, 0400);
-
-/*
- * Total bytes of memory regions that successfully be reclaimed.
- */
-static unsigned long bytes_reclaimed_regions __read_mostly;
-module_param(bytes_reclaimed_regions, ulong, 0400);
-
-/*
- * Number of times that the time/space quota limits have exceeded
- */
-static unsigned long nr_quota_exceeds __read_mostly;
-module_param(nr_quota_exceeds, ulong, 0400);
+static struct damos_stat damon_reclaim_stat;
+DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
+ reclaim_tried_regions, reclaimed_regions, quota_exceeds);
static struct damon_ctx *ctx;
static struct damon_target *target;
-struct damon_reclaim_ram_walk_arg {
- unsigned long start;
- unsigned long end;
-};
-
-static int walk_system_ram(struct resource *res, void *arg)
-{
- struct damon_reclaim_ram_walk_arg *a = arg;
-
- if (a->end - a->start < resource_size(res)) {
- a->start = res->start;
- a->end = res->end;
- }
- return 0;
-}
-
-/*
- * Find biggest 'System RAM' resource and store its start and end address in
- * @start and @end, respectively. If no System RAM is found, returns false.
- */
-static bool get_monitoring_region(unsigned long *start, unsigned long *end)
-{
- struct damon_reclaim_ram_walk_arg arg = {};
-
- walk_system_ram_res(0, ULONG_MAX, &arg, walk_system_ram);
- if (arg.end <= arg.start)
- return false;
-
- *start = arg.start;
- *end = arg.end;
- return true;
-}
-
static struct damos *damon_reclaim_new_scheme(void)
{
- struct damos_watermarks wmarks = {
- .metric = DAMOS_WMARK_FREE_MEM_RATE,
- .interval = wmarks_interval,
- .high = wmarks_high,
- .mid = wmarks_mid,
- .low = wmarks_low,
- };
- struct damos_quota quota = {
- /*
- * Do not try reclamation for more than quota_ms milliseconds
- * or quota_sz bytes within quota_reset_interval_ms.
- */
- .ms = quota_ms,
- .sz = quota_sz,
- .reset_interval = quota_reset_interval_ms,
- /* Within the quota, page out older regions first. */
- .weight_sz = 0,
- .weight_nr_accesses = 0,
- .weight_age = 1
+ struct damos_access_pattern pattern = {
+ /* Find regions having PAGE_SIZE or larger size */
+ .min_sz_region = PAGE_SIZE,
+ .max_sz_region = ULONG_MAX,
+ /* and not accessed at all */
+ .min_nr_accesses = 0,
+ .max_nr_accesses = 0,
+ /* for min_age or more micro-seconds */
+ .min_age_region = min_age /
+ damon_reclaim_mon_attrs.aggr_interval,
+ .max_age_region = UINT_MAX,
};
- struct damos *scheme = damon_new_scheme(
- /* Find regions having PAGE_SIZE or larger size */
- PAGE_SIZE, ULONG_MAX,
- /* and not accessed at all */
- 0, 0,
- /* for min_age or more micro-seconds, and */
- min_age / aggr_interval, UINT_MAX,
+
+ return damon_new_scheme(
+ &pattern,
/* page out those, as soon as found */
DAMOS_PAGEOUT,
/* under the quota. */
- &quota,
+ &damon_reclaim_quota,
/* (De)activate this according to the watermarks. */
- &wmarks);
-
- return scheme;
+ &damon_reclaim_wmarks);
}
static int damon_reclaim_apply_parameters(void)
{
struct damos *scheme;
- struct damon_addr_range addr_range;
int err = 0;
- err = damon_set_attrs(ctx, sample_interval, aggr_interval, 0,
- min_nr_regions, max_nr_regions);
+ err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
if (err)
return err;
@@ -316,19 +154,11 @@ static int damon_reclaim_apply_parameters(void)
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
- err = damon_set_schemes(ctx, &scheme, 1);
- if (err)
- return err;
+ damon_set_schemes(ctx, &scheme, 1);
- if (monitor_region_start > monitor_region_end)
- return -EINVAL;
- if (!monitor_region_start && !monitor_region_end &&
- !get_monitoring_region(&monitor_region_start,
- &monitor_region_end))
- return -EINVAL;
- addr_range.start = monitor_region_start;
- addr_range.end = monitor_region_end;
- return damon_set_regions(target, &addr_range, 1);
+ return damon_set_region_biggest_system_ram_default(target,
+ &monitor_region_start,
+ &monitor_region_end);
}
static int damon_reclaim_turn(bool on)
@@ -413,13 +243,8 @@ static int damon_reclaim_after_aggregation(struct damon_ctx *c)
struct damos *s;
/* update the stats parameter */
- damon_for_each_scheme(s, c) {
- nr_reclaim_tried_regions = s->stat.nr_tried;
- bytes_reclaim_tried_regions = s->stat.sz_tried;
- nr_reclaimed_regions = s->stat.nr_applied;
- bytes_reclaimed_regions = s->stat.sz_applied;
- nr_quota_exceeds = s->stat.qt_exceeds;
- }
+ damon_for_each_scheme(s, c)
+ damon_reclaim_stat = s->stat;
return damon_reclaim_handle_commit_inputs();
}
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index bdef9682d0a0..9f1219a67e3f 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -58,7 +58,7 @@ static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
err = kstrtoul(buf, 0, &min);
if (err)
- return -EINVAL;
+ return err;
range->min = min;
return count;
@@ -83,7 +83,7 @@ static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
err = kstrtoul(buf, 0, &max);
if (err)
- return -EINVAL;
+ return err;
range->max = max;
return count;
@@ -291,9 +291,7 @@ static ssize_t interval_us_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->interval_us);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t high_show(struct kobject *kobj,
@@ -312,9 +310,7 @@ static ssize_t high_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->high);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t mid_show(struct kobject *kobj,
@@ -333,9 +329,7 @@ static ssize_t mid_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->mid);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t low_show(struct kobject *kobj,
@@ -354,9 +348,7 @@ static ssize_t low_store(struct kobject *kobj,
struct damon_sysfs_watermarks, kobj);
int err = kstrtoul(buf, 0, &watermarks->low);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_watermarks_release(struct kobject *kobj)
@@ -437,9 +429,7 @@ static ssize_t sz_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->sz);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t nr_accesses_permil_show(struct kobject *kobj,
@@ -458,9 +448,7 @@ static ssize_t nr_accesses_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->nr_accesses);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t age_permil_show(struct kobject *kobj,
@@ -479,9 +467,7 @@ static ssize_t age_permil_store(struct kobject *kobj,
struct damon_sysfs_weights, kobj);
int err = kstrtouint(buf, 0, &weights->age);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_weights_release(struct kobject *kobj)
@@ -1031,8 +1017,7 @@ static ssize_t nr_schemes_show(struct kobject *kobj,
static ssize_t nr_schemes_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_schemes *schemes = container_of(kobj,
- struct damon_sysfs_schemes, kobj);
+ struct damon_sysfs_schemes *schemes;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1040,6 +1025,8 @@ static ssize_t nr_schemes_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ schemes = container_of(kobj, struct damon_sysfs_schemes, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_schemes_add_dirs(schemes, nr);
@@ -1110,9 +1097,7 @@ static ssize_t start_store(struct kobject *kobj, struct kobj_attribute *attr,
struct damon_sysfs_region, kobj);
int err = kstrtoul(buf, 0, &region->start);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static ssize_t end_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1131,9 +1116,7 @@ static ssize_t end_store(struct kobject *kobj, struct kobj_attribute *attr,
struct damon_sysfs_region, kobj);
int err = kstrtoul(buf, 0, &region->end);
- if (err)
- return -EINVAL;
- return count;
+ return err ? err : count;
}
static void damon_sysfs_region_release(struct kobject *kobj)
@@ -1237,8 +1220,7 @@ static ssize_t nr_regions_show(struct kobject *kobj,
static ssize_t nr_regions_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_regions *regions = container_of(kobj,
- struct damon_sysfs_regions, kobj);
+ struct damon_sysfs_regions *regions;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1246,6 +1228,8 @@ static ssize_t nr_regions_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ regions = container_of(kobj, struct damon_sysfs_regions, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_regions_add_dirs(regions, nr);
@@ -1440,8 +1424,7 @@ static ssize_t nr_targets_show(struct kobject *kobj,
static ssize_t nr_targets_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_targets *targets = container_of(kobj,
- struct damon_sysfs_targets, kobj);
+ struct damon_sysfs_targets *targets;
int nr, err = kstrtoint(buf, 0, &nr);
if (err)
@@ -1449,6 +1432,8 @@ static ssize_t nr_targets_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ targets = container_of(kobj, struct damon_sysfs_targets, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_targets_add_dirs(targets, nr);
@@ -1525,7 +1510,7 @@ static ssize_t sample_us_store(struct kobject *kobj,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->sample_us = us;
return count;
@@ -1549,7 +1534,7 @@ static ssize_t aggr_us_store(struct kobject *kobj, struct kobj_attribute *attr,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->aggr_us = us;
return count;
@@ -1573,7 +1558,7 @@ static ssize_t update_us_store(struct kobject *kobj,
int err = kstrtoul(buf, 0, &us);
if (err)
- return -EINVAL;
+ return err;
intervals->update_us = us;
return count;
@@ -1962,8 +1947,7 @@ static ssize_t nr_contexts_show(struct kobject *kobj,
static ssize_t nr_contexts_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_contexts *contexts = container_of(kobj,
- struct damon_sysfs_contexts, kobj);
+ struct damon_sysfs_contexts *contexts;
int nr, err;
err = kstrtoint(buf, 0, &nr);
@@ -1973,6 +1957,7 @@ static ssize_t nr_contexts_store(struct kobject *kobj,
if (nr < 0 || 1 < nr)
return -EINVAL;
+ contexts = container_of(kobj, struct damon_sysfs_contexts, kobj);
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_contexts_add_dirs(contexts, nr);
@@ -2127,18 +2112,23 @@ static int damon_sysfs_set_attrs(struct damon_ctx *ctx,
struct damon_sysfs_intervals *sys_intervals = sys_attrs->intervals;
struct damon_sysfs_ul_range *sys_nr_regions =
sys_attrs->nr_regions_range;
-
- return damon_set_attrs(ctx, sys_intervals->sample_us,
- sys_intervals->aggr_us, sys_intervals->update_us,
- sys_nr_regions->min, sys_nr_regions->max);
+ struct damon_attrs attrs = {
+ .sample_interval = sys_intervals->sample_us,
+ .aggr_interval = sys_intervals->aggr_us,
+ .ops_update_interval = sys_intervals->update_us,
+ .min_nr_regions = sys_nr_regions->min,
+ .max_nr_regions = sys_nr_regions->max,
+ };
+ return damon_set_attrs(ctx, &attrs);
}
static void damon_sysfs_destroy_targets(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
+ bool has_pid = damon_target_has_pid(ctx);
damon_for_each_target_safe(t, next, ctx) {
- if (damon_target_has_pid(ctx))
+ if (has_pid)
put_pid(t->pid);
damon_destroy_target(t);
}
@@ -2259,11 +2249,20 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_scheme *sysfs_scheme)
{
- struct damon_sysfs_access_pattern *pattern =
+ struct damon_sysfs_access_pattern *access_pattern =
sysfs_scheme->access_pattern;
struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
+
+ struct damos_access_pattern pattern = {
+ .min_sz_region = access_pattern->sz->min,
+ .max_sz_region = access_pattern->sz->max,
+ .min_nr_accesses = access_pattern->nr_accesses->min,
+ .max_nr_accesses = access_pattern->nr_accesses->max,
+ .min_age_region = access_pattern->age->min,
+ .max_age_region = access_pattern->age->max,
+ };
struct damos_quota quota = {
.ms = sysfs_quotas->ms,
.sz = sysfs_quotas->sz,
@@ -2280,10 +2279,8 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
- return damon_new_scheme(pattern->sz->min, pattern->sz->max,
- pattern->nr_accesses->min, pattern->nr_accesses->max,
- pattern->age->min, pattern->age->max,
- sysfs_scheme->action, &quota, &wmarks);
+ return damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
+ &wmarks);
}
static int damon_sysfs_set_schemes(struct damon_ctx *ctx,
@@ -2309,7 +2306,7 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
- if (ctx->ops.id != DAMON_OPS_VADDR && ctx->ops.id != DAMON_OPS_FVADDR)
+ if (!damon_target_has_pid(ctx))
return;
mutex_lock(&ctx->kdamond_lock);
@@ -2455,8 +2452,7 @@ static int damon_sysfs_turn_damon_on(struct damon_sysfs_kdamond *kdamond)
struct damon_ctx *ctx;
int err;
- if (kdamond->damon_ctx &&
- damon_sysfs_ctx_running(kdamond->damon_ctx))
+ if (damon_sysfs_kdamond_running(kdamond))
return -EBUSY;
if (damon_sysfs_cmd_request.kdamond == kdamond)
return -EBUSY;
@@ -2579,19 +2575,16 @@ static ssize_t pid_show(struct kobject *kobj,
struct damon_sysfs_kdamond *kdamond = container_of(kobj,
struct damon_sysfs_kdamond, kobj);
struct damon_ctx *ctx;
- int pid;
+ int pid = -1;
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
ctx = kdamond->damon_ctx;
- if (!ctx) {
- pid = -1;
+ if (!ctx)
goto out;
- }
+
mutex_lock(&ctx->kdamond_lock);
- if (!ctx->kdamond)
- pid = -1;
- else
+ if (ctx->kdamond)
pid = ctx->kdamond->pid;
mutex_unlock(&ctx->kdamond_lock);
out:
@@ -2657,23 +2650,18 @@ static void damon_sysfs_kdamonds_rm_dirs(struct damon_sysfs_kdamonds *kdamonds)
kdamonds->kdamonds_arr = NULL;
}
-static int damon_sysfs_nr_running_ctxs(struct damon_sysfs_kdamond **kdamonds,
+static bool damon_sysfs_kdamonds_busy(struct damon_sysfs_kdamond **kdamonds,
int nr_kdamonds)
{
- int nr_running_ctxs = 0;
int i;
for (i = 0; i < nr_kdamonds; i++) {
- struct damon_ctx *ctx = kdamonds[i]->damon_ctx;
-
- if (!ctx)
- continue;
- mutex_lock(&ctx->kdamond_lock);
- if (ctx->kdamond)
- nr_running_ctxs++;
- mutex_unlock(&ctx->kdamond_lock);
+ if (damon_sysfs_kdamond_running(kdamonds[i]) ||
+ damon_sysfs_cmd_request.kdamond == kdamonds[i])
+ return true;
}
- return nr_running_ctxs;
+
+ return false;
}
static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
@@ -2682,15 +2670,9 @@ static int damon_sysfs_kdamonds_add_dirs(struct damon_sysfs_kdamonds *kdamonds,
struct damon_sysfs_kdamond **kdamonds_arr, *kdamond;
int err, i;
- if (damon_sysfs_nr_running_ctxs(kdamonds->kdamonds_arr, kdamonds->nr))
+ if (damon_sysfs_kdamonds_busy(kdamonds->kdamonds_arr, kdamonds->nr))
return -EBUSY;
- for (i = 0; i < kdamonds->nr; i++) {
- if (damon_sysfs_cmd_request.kdamond ==
- kdamonds->kdamonds_arr[i])
- return -EBUSY;
- }
-
damon_sysfs_kdamonds_rm_dirs(kdamonds);
if (!nr_kdamonds)
return 0;
@@ -2741,8 +2723,7 @@ static ssize_t nr_kdamonds_show(struct kobject *kobj,
static ssize_t nr_kdamonds_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
- struct damon_sysfs_kdamonds *kdamonds = container_of(kobj,
- struct damon_sysfs_kdamonds, kobj);
+ struct damon_sysfs_kdamonds *kdamonds;
int nr, err;
err = kstrtoint(buf, 0, &nr);
@@ -2751,6 +2732,8 @@ static ssize_t nr_kdamonds_store(struct kobject *kobj,
if (nr < 0)
return -EINVAL;
+ kdamonds = container_of(kobj, struct damon_sysfs_kdamonds, kobj);
+
if (!mutex_trylock(&damon_sysfs_lock))
return -EBUSY;
err = damon_sysfs_kdamonds_add_dirs(kdamonds, nr);
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index d4f55f349100..bce37c487540 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -14,33 +14,19 @@
#include <kunit/test.h>
-static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas)
+static void __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
+ ssize_t nr_vmas)
{
- int i, j;
- unsigned long largest_gap, gap;
+ int i;
+ MA_STATE(mas, mt, 0, 0);
if (!nr_vmas)
return;
- for (i = 0; i < nr_vmas - 1; i++) {
- vmas[i].vm_next = &vmas[i + 1];
-
- vmas[i].vm_rb.rb_left = NULL;
- vmas[i].vm_rb.rb_right = &vmas[i + 1].vm_rb;
-
- largest_gap = 0;
- for (j = i; j < nr_vmas; j++) {
- if (j == 0)
- continue;
- gap = vmas[j].vm_start - vmas[j - 1].vm_end;
- if (gap > largest_gap)
- largest_gap = gap;
- }
- vmas[i].rb_subtree_gap = largest_gap;
- }
- vmas[i].vm_next = NULL;
- vmas[i].vm_rb.rb_right = NULL;
- vmas[i].rb_subtree_gap = 0;
+ mas_lock(&mas);
+ for (i = 0; i < nr_vmas; i++)
+ vma_mas_store(&vmas[i], &mas);
+ mas_unlock(&mas);
}
/*
@@ -72,6 +58,7 @@ static void __link_vmas(struct vm_area_struct *vmas, ssize_t nr_vmas)
*/
static void damon_test_three_regions_in_vmas(struct kunit *test)
{
+ static struct mm_struct mm;
struct damon_addr_range regions[3] = {0,};
/* 10-20-25, 200-210-220, 300-305, 307-330 */
struct vm_area_struct vmas[] = {
@@ -83,9 +70,10 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
(struct vm_area_struct) {.vm_start = 307, .vm_end = 330},
};
- __link_vmas(vmas, 6);
+ mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
+ __link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas));
- __damon_va_three_regions(&vmas[0], regions);
+ __damon_va_three_regions(&mm, regions);
KUNIT_EXPECT_EQ(test, 10ul, regions[0].start);
KUNIT_EXPECT_EQ(test, 25ul, regions[0].end);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 3c7b9d6dca95..ea94e0b2c311 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -113,37 +113,38 @@ static unsigned long sz_range(struct damon_addr_range *r)
*
* Returns 0 if success, or negative error code otherwise.
*/
-static int __damon_va_three_regions(struct vm_area_struct *vma,
+static int __damon_va_three_regions(struct mm_struct *mm,
struct damon_addr_range regions[3])
{
- struct damon_addr_range gap = {0}, first_gap = {0}, second_gap = {0};
- struct vm_area_struct *last_vma = NULL;
- unsigned long start = 0;
- struct rb_root rbroot;
-
- /* Find two biggest gaps so that first_gap > second_gap > others */
- for (; vma; vma = vma->vm_next) {
- if (!last_vma) {
- start = vma->vm_start;
- goto next;
- }
+ struct damon_addr_range first_gap = {0}, second_gap = {0};
+ VMA_ITERATOR(vmi, mm, 0);
+ struct vm_area_struct *vma, *prev = NULL;
+ unsigned long start;
- if (vma->rb_subtree_gap <= sz_range(&second_gap)) {
- rbroot.rb_node = &vma->vm_rb;
- vma = rb_entry(rb_last(&rbroot),
- struct vm_area_struct, vm_rb);
+ /*
+ * Find the two biggest gaps so that first_gap > second_gap > others.
+ * If this is too slow, it can be optimised to examine the maple
+ * tree gaps.
+ */
+ for_each_vma(vmi, vma) {
+ unsigned long gap;
+
+ if (!prev) {
+ start = vma->vm_start;
goto next;
}
-
- gap.start = last_vma->vm_end;
- gap.end = vma->vm_start;
- if (sz_range(&gap) > sz_range(&second_gap)) {
- swap(gap, second_gap);
- if (sz_range(&second_gap) > sz_range(&first_gap))
- swap(second_gap, first_gap);
+ gap = vma->vm_start - prev->vm_end;
+
+ if (gap > sz_range(&first_gap)) {
+ second_gap = first_gap;
+ first_gap.start = prev->vm_end;
+ first_gap.end = vma->vm_start;
+ } else if (gap > sz_range(&second_gap)) {
+ second_gap.start = prev->vm_end;
+ second_gap.end = vma->vm_start;
}
next:
- last_vma = vma;
+ prev = vma;
}
if (!sz_range(&second_gap) || !sz_range(&first_gap))
@@ -159,7 +160,7 @@ next:
regions[1].start = ALIGN(first_gap.end, DAMON_MIN_REGION);
regions[1].end = ALIGN(second_gap.start, DAMON_MIN_REGION);
regions[2].start = ALIGN(second_gap.end, DAMON_MIN_REGION);
- regions[2].end = ALIGN(last_vma->vm_end, DAMON_MIN_REGION);
+ regions[2].end = ALIGN(prev->vm_end, DAMON_MIN_REGION);
return 0;
}
@@ -180,7 +181,7 @@ static int damon_va_three_regions(struct damon_target *t,
return -EINVAL;
mmap_read_lock(mm);
- rc = __damon_va_three_regions(mm->mmap, regions);
+ rc = __damon_va_three_regions(mm, regions);
mmap_read_unlock(mm);
mmput(mm);
@@ -250,8 +251,8 @@ static void __damon_va_init_regions(struct damon_ctx *ctx,
for (i = 0; i < 3; i++)
sz += regions[i].end - regions[i].start;
- if (ctx->min_nr_regions)
- sz /= ctx->min_nr_regions;
+ if (ctx->attrs.min_nr_regions)
+ sz /= ctx->attrs.min_nr_regions;
if (sz < DAMON_MIN_REGION)
sz = DAMON_MIN_REGION;
@@ -302,9 +303,14 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
pte_t *pte;
spinlock_t *ptl;
- if (pmd_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
- if (pmd_huge(*pmd)) {
+ if (!pmd_present(*pmd)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ if (pmd_trans_huge(*pmd)) {
damon_pmdp_mkold(pmd, walk->mm, addr);
spin_unlock(ptl);
return 0;
@@ -391,8 +397,8 @@ static void damon_va_mkold(struct mm_struct *mm, unsigned long addr)
* Functions for the access checking of the regions
*/
-static void __damon_va_prepare_access_check(struct damon_ctx *ctx,
- struct mm_struct *mm, struct damon_region *r)
+static void __damon_va_prepare_access_check(struct mm_struct *mm,
+ struct damon_region *r)
{
r->sampling_addr = damon_rand(r->ar.start, r->ar.end);
@@ -410,7 +416,7 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
if (!mm)
continue;
damon_for_each_region(r, t)
- __damon_va_prepare_access_check(ctx, mm, r);
+ __damon_va_prepare_access_check(mm, r);
mmput(mm);
}
}
@@ -429,9 +435,14 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
struct damon_young_walk_private *priv = walk->private;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (pmd_huge(*pmd)) {
+ if (pmd_trans_huge(*pmd)) {
ptl = pmd_lock(walk->mm, pmd);
- if (!pmd_huge(*pmd)) {
+ if (!pmd_present(*pmd)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ if (!pmd_trans_huge(*pmd)) {
spin_unlock(ptl);
goto regular_page;
}
@@ -532,16 +543,15 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
* mm 'mm_struct' for the given virtual address space
* r the region to be checked
*/
-static void __damon_va_check_access(struct damon_ctx *ctx,
- struct mm_struct *mm, struct damon_region *r)
+static void __damon_va_check_access(struct mm_struct *mm,
+ struct damon_region *r, bool same_target)
{
- static struct mm_struct *last_mm;
static unsigned long last_addr;
static unsigned long last_page_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (mm == last_mm && (ALIGN_DOWN(last_addr, last_page_sz) ==
+ if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
if (last_accessed)
r->nr_accesses++;
@@ -552,7 +562,6 @@ static void __damon_va_check_access(struct damon_ctx *ctx,
if (last_accessed)
r->nr_accesses++;
- last_mm = mm;
last_addr = r->sampling_addr;
}
@@ -562,14 +571,17 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
struct mm_struct *mm;
struct damon_region *r;
unsigned int max_nr_accesses = 0;
+ bool same_target;
damon_for_each_target(t, ctx) {
mm = damon_get_mm(t);
if (!mm)
continue;
+ same_target = false;
damon_for_each_region(r, t) {
- __damon_va_check_access(ctx, mm, r);
+ __damon_va_check_access(mm, r, same_target);
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
+ same_target = true;
}
mmput(mm);
}
@@ -581,9 +593,8 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
* Functions for the target validity check and cleanup
*/
-static bool damon_va_target_valid(void *target)
+static bool damon_va_target_valid(struct damon_target *t)
{
- struct damon_target *t = target;
struct task_struct *task;
task = damon_get_task_struct(t);
@@ -646,6 +657,9 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
case DAMOS_STAT:
return 0;
default:
+ /*
+ * DAMOS actions that are not yet supported by 'vaddr'.
+ */
return 0;
}
@@ -659,7 +673,7 @@ static int damon_va_scheme_score(struct damon_ctx *context,
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pageout_score(context, r, scheme);
+ return damon_cold_score(context, r, scheme);
default:
break;
}
diff --git a/mm/debug.c b/mm/debug.c
index bef329bf28f0..0fd15ba70d16 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -139,13 +139,11 @@ EXPORT_SYMBOL(dump_page);
void dump_vma(const struct vm_area_struct *vma)
{
- pr_emerg("vma %px start %px end %px\n"
- "next %px prev %px mm %px\n"
+ pr_emerg("vma %px start %px end %px mm %px\n"
"prot %lx anon_vma %px vm_ops %px\n"
"pgoff %lx file %px private_data %px\n"
"flags: %#lx(%pGv)\n",
- vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
- vma->vm_prev, vma->vm_mm,
+ vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm,
(unsigned long)pgprot_val(vma->vm_page_prot),
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
vma->vm_file, vma->vm_private_data,
@@ -155,11 +153,11 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
+ pr_emerg("mm %px task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
- "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
+ "mmap_base %lu mmap_legacy_base %lu\n"
"pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n"
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
"pinned_vm %llx data_vm %lx exec_vm %lx stack_vm %lx\n"
@@ -183,11 +181,11 @@ void dump_mm(const struct mm_struct *mm)
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",
- mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
+ mm, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
- mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
+ mm->mmap_base, mm->mmap_legacy_base,
mm->pgd, atomic_read(&mm->mm_users),
atomic_read(&mm->mm_count),
mm_pgtables_bytes(mm),
diff --git a/mm/filemap.c b/mm/filemap.c
index c943d1b90cc2..08341616ae7a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -632,22 +632,23 @@ bool filemap_range_has_writeback(struct address_space *mapping,
{
XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
pgoff_t max = end_byte >> PAGE_SHIFT;
- struct page *page;
+ struct folio *folio;
if (end_byte < start_byte)
return false;
rcu_read_lock();
- xas_for_each(&xas, page, max) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, max) {
+ if (xas_retry(&xas, folio))
continue;
- if (xa_is_value(page))
+ if (xa_is_value(folio))
continue;
- if (PageDirty(page) || PageLocked(page) || PageWriteback(page))
+ if (folio_test_dirty(folio) || folio_test_locked(folio) ||
+ folio_test_writeback(folio))
break;
}
rcu_read_unlock();
- return page != NULL;
+ return folio != NULL;
}
EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
@@ -1221,15 +1222,12 @@ static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
bool thrashing = false;
- bool delayacct = false;
unsigned long pflags;
+ bool in_thrashing;
if (bit_nr == PG_locked &&
!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
- if (!folio_test_swapbacked(folio)) {
- delayacct_thrashing_start();
- delayacct = true;
- }
+ delayacct_thrashing_start(&in_thrashing);
psi_memstall_enter(&pflags);
thrashing = true;
}
@@ -1329,8 +1327,7 @@ repeat:
finish_wait(q, wait);
if (thrashing) {
- if (delayacct)
- delayacct_thrashing_end();
+ delayacct_thrashing_end(&in_thrashing);
psi_memstall_leave(&pflags);
}
@@ -1378,17 +1375,14 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
struct wait_page_queue wait_page;
wait_queue_entry_t *wait = &wait_page.wait;
bool thrashing = false;
- bool delayacct = false;
unsigned long pflags;
+ bool in_thrashing;
wait_queue_head_t *q;
struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
q = folio_waitqueue(folio);
if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
- if (!folio_test_swapbacked(folio)) {
- delayacct_thrashing_start();
- delayacct = true;
- }
+ delayacct_thrashing_start(&in_thrashing);
psi_memstall_enter(&pflags);
thrashing = true;
}
@@ -1435,8 +1429,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
finish_wait(q, wait);
if (thrashing) {
- if (delayacct)
- delayacct_thrashing_end();
+ delayacct_thrashing_end(&in_thrashing);
psi_memstall_leave(&pflags);
}
}
@@ -1467,7 +1460,7 @@ EXPORT_SYMBOL(folio_wait_bit_killable);
*
* Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
*/
-int folio_put_wait_locked(struct folio *folio, int state)
+static int folio_put_wait_locked(struct folio *folio, int state)
{
return folio_wait_bit_common(folio, PG_locked, state, DROP);
}
@@ -1633,24 +1626,26 @@ EXPORT_SYMBOL(folio_end_writeback);
*/
void page_endio(struct page *page, bool is_write, int err)
{
+ struct folio *folio = page_folio(page);
+
if (!is_write) {
if (!err) {
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
} else {
- ClearPageUptodate(page);
- SetPageError(page);
+ folio_clear_uptodate(folio);
+ folio_set_error(folio);
}
- unlock_page(page);
+ folio_unlock(folio);
} else {
if (err) {
struct address_space *mapping;
- SetPageError(page);
- mapping = page_mapping(page);
+ folio_set_error(folio);
+ mapping = folio_mapping(folio);
if (mapping)
mapping_set_error(mapping, err);
}
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
}
EXPORT_SYMBOL_GPL(page_endio);
@@ -2195,30 +2190,31 @@ bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
}
/**
- * find_get_pages_contig - gang contiguous pagecache lookup
+ * filemap_get_folios_contig - Get a batch of contiguous folios
* @mapping: The address_space to search
- * @index: The starting page index
- * @nr_pages: The maximum number of pages
- * @pages: Where the resulting pages are placed
+ * @start: The starting page index
+ * @end: The final page index (inclusive)
+ * @fbatch: The batch to fill
*
- * find_get_pages_contig() works exactly like find_get_pages_range(),
- * except that the returned number of pages are guaranteed to be
- * contiguous.
+ * filemap_get_folios_contig() works exactly like filemap_get_folios(),
+ * except the returned folios are guaranteed to be contiguous. This may
+ * not return all contiguous folios if the batch gets filled up.
*
- * Return: the number of pages which were found.
+ * Return: The number of folios found.
+ * Also update @start to be positioned for traversal of the next folio.
*/
-unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
- unsigned int nr_pages, struct page **pages)
+
+unsigned filemap_get_folios_contig(struct address_space *mapping,
+ pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
{
- XA_STATE(xas, &mapping->i_pages, index);
+ XA_STATE(xas, &mapping->i_pages, *start);
+ unsigned long nr;
struct folio *folio;
- unsigned int ret = 0;
-
- if (unlikely(!nr_pages))
- return 0;
rcu_read_lock();
- for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
+
+ for (folio = xas_load(&xas); folio && xas.xa_index <= end;
+ folio = xas_next(&xas)) {
if (xas_retry(&xas, folio))
continue;
/*
@@ -2226,33 +2222,45 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
* No current caller is looking for DAX entries.
*/
if (xa_is_value(folio))
- break;
+ goto update_start;
if (!folio_try_get_rcu(folio))
goto retry;
if (unlikely(folio != xas_reload(&xas)))
- goto put_page;
+ goto put_folio;
-again:
- pages[ret] = folio_file_page(folio, xas.xa_index);
- if (++ret == nr_pages)
- break;
- if (folio_more_pages(folio, xas.xa_index, ULONG_MAX)) {
- xas.xa_index++;
- folio_ref_inc(folio);
- goto again;
+ if (!folio_batch_add(fbatch, folio)) {
+ nr = folio_nr_pages(folio);
+
+ if (folio_test_hugetlb(folio))
+ nr = 1;
+ *start = folio->index + nr;
+ goto out;
}
continue;
-put_page:
+put_folio:
folio_put(folio);
+
retry:
xas_reset(&xas);
}
+
+update_start:
+ nr = folio_batch_count(fbatch);
+
+ if (nr) {
+ folio = fbatch->folios[nr - 1];
+ if (folio_test_hugetlb(folio))
+ *start = folio->index + 1;
+ else
+ *start = folio->index + folio_nr_pages(folio);
+ }
+out:
rcu_read_unlock();
- return ret;
+ return folio_batch_count(fbatch);
}
-EXPORT_SYMBOL(find_get_pages_contig);
+EXPORT_SYMBOL(filemap_get_folios_contig);
/**
* find_get_pages_range_tag - Find and return head pages matching @tag.
@@ -3719,7 +3727,7 @@ ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
size_t copied; /* Bytes copied from user */
- void *fsdata;
+ void *fsdata = NULL;
offset = (pos & (PAGE_SIZE - 1));
bytes = min_t(unsigned long, PAGE_SIZE - offset,
diff --git a/mm/folio-compat.c b/mm/folio-compat.c
index 458618c7302c..e1e23b4947d7 100644
--- a/mm/folio-compat.c
+++ b/mm/folio-compat.c
@@ -88,6 +88,12 @@ void lru_cache_add(struct page *page)
}
EXPORT_SYMBOL(lru_cache_add);
+void lru_cache_add_inactive_or_unevictable(struct page *page,
+ struct vm_area_struct *vma)
+{
+ folio_add_lru_vma(page_folio(page), vma);
+}
+
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
diff --git a/mm/gup.c b/mm/gup.c
index 00926abb4426..ce00a4c40da8 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -158,6 +158,13 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
else
folio_ref_add(folio,
refs * (GUP_PIN_COUNTING_BIAS - 1));
+ /*
+ * Adjust the pincount before re-checking the PTE for changes.
+ * This is essentially a smp_mb() and is paired with a memory
+ * barrier in page_try_share_anon_rmap().
+ */
+ smp_mb__after_atomic();
+
node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs);
return folio;
@@ -554,7 +561,7 @@ retry:
migration_entry_wait(mm, pmd, address);
goto retry;
}
- if ((flags & FOLL_NUMA) && pte_protnone(pte))
+ if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
goto no_page;
page = vm_normal_page(vma, address, pte);
@@ -707,7 +714,7 @@ retry:
if (likely(!pmd_trans_huge(pmdval)))
return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
- if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
+ if (pmd_protnone(pmdval) && !gup_can_follow_protnone(flags))
return no_page_table(vma, flags);
retry_locked:
@@ -1153,14 +1160,6 @@ static long __get_user_pages(struct mm_struct *mm,
VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
- /*
- * If FOLL_FORCE is set then do not force a full fault as the hinting
- * fault information is unrelated to the reference behaviour of a task
- * using the address space
- */
- if (!(gup_flags & FOLL_FORCE))
- gup_flags |= FOLL_NUMA;
-
do {
struct page *page;
unsigned int foll_flags = gup_flags;
@@ -1667,10 +1666,11 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
if (!locked) {
locked = 1;
mmap_read_lock(mm);
- vma = find_vma(mm, nstart);
+ vma = find_vma_intersection(mm, nstart, end);
} else if (nstart >= vma->vm_end)
- vma = vma->vm_next;
- if (!vma || vma->vm_start >= end)
+ vma = find_vma_intersection(mm, vma->vm_end, end);
+
+ if (!vma)
break;
/*
* Set [nstart; nend) to intersection of desired address
@@ -1927,20 +1927,16 @@ struct page *get_dump_page(unsigned long addr)
#ifdef CONFIG_MIGRATION
/*
- * Check whether all pages are pinnable, if so return number of pages. If some
- * pages are not pinnable, migrate them, and unpin all pages. Return zero if
- * pages were migrated, or if some pages were not successfully isolated.
- * Return negative error if migration fails.
+ * Returns the number of collected pages. Return value is always >= 0.
*/
-static long check_and_migrate_movable_pages(unsigned long nr_pages,
- struct page **pages,
- unsigned int gup_flags)
+static unsigned long collect_longterm_unpinnable_pages(
+ struct list_head *movable_page_list,
+ unsigned long nr_pages,
+ struct page **pages)
{
- unsigned long isolation_error_count = 0, i;
+ unsigned long i, collected = 0;
struct folio *prev_folio = NULL;
- LIST_HEAD(movable_page_list);
- bool drain_allow = true, coherent_pages = false;
- int ret = 0;
+ bool drain_allow = true;
for (i = 0; i < nr_pages; i++) {
struct folio *folio = page_folio(pages[i]);
@@ -1949,45 +1945,16 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
continue;
prev_folio = folio;
- /*
- * Device coherent pages are managed by a driver and should not
- * be pinned indefinitely as it prevents the driver moving the
- * page. So when trying to pin with FOLL_LONGTERM instead try
- * to migrate the page out of device memory.
- */
- if (folio_is_device_coherent(folio)) {
- /*
- * We always want a new GUP lookup with device coherent
- * pages.
- */
- pages[i] = 0;
- coherent_pages = true;
-
- /*
- * Migration will fail if the page is pinned, so convert
- * the pin on the source page to a normal reference.
- */
- if (gup_flags & FOLL_PIN) {
- get_page(&folio->page);
- unpin_user_page(&folio->page);
- }
+ if (folio_is_longterm_pinnable(folio))
+ continue;
- ret = migrate_device_coherent_page(&folio->page);
- if (ret)
- goto unpin_pages;
+ collected++;
+ if (folio_is_device_coherent(folio))
continue;
- }
- if (folio_is_longterm_pinnable(folio))
- continue;
- /*
- * Try to move out any movable page before pinning the range.
- */
if (folio_test_hugetlb(folio)) {
- if (isolate_hugetlb(&folio->page,
- &movable_page_list))
- isolation_error_count++;
+ isolate_hugetlb(&folio->page, movable_page_list);
continue;
}
@@ -1996,63 +1963,124 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
drain_allow = false;
}
- if (folio_isolate_lru(folio)) {
- isolation_error_count++;
+ if (!folio_isolate_lru(folio))
continue;
- }
- list_add_tail(&folio->lru, &movable_page_list);
+
+ list_add_tail(&folio->lru, movable_page_list);
node_stat_mod_folio(folio,
NR_ISOLATED_ANON + folio_is_file_lru(folio),
folio_nr_pages(folio));
}
- if (!list_empty(&movable_page_list) || isolation_error_count ||
- coherent_pages)
- goto unpin_pages;
+ return collected;
+}
- /*
- * If list is empty, and no isolation errors, means that all pages are
- * in the correct zone.
- */
- return nr_pages;
+/*
+ * Unpins all pages and migrates device coherent pages and movable_page_list.
+ * Returns -EAGAIN if all pages were successfully migrated or -errno for failure
+ * (or partial success).
+ */
+static int migrate_longterm_unpinnable_pages(
+ struct list_head *movable_page_list,
+ unsigned long nr_pages,
+ struct page **pages)
+{
+ int ret;
+ unsigned long i;
-unpin_pages:
- /*
- * pages[i] might be NULL if any device coherent pages were found.
- */
for (i = 0; i < nr_pages; i++) {
- if (!pages[i])
+ struct folio *folio = page_folio(pages[i]);
+
+ if (folio_is_device_coherent(folio)) {
+ /*
+ * Migration will fail if the page is pinned, so convert
+ * the pin on the source page to a normal reference.
+ */
+ pages[i] = NULL;
+ folio_get(folio);
+ gup_put_folio(folio, 1, FOLL_PIN);
+
+ if (migrate_device_coherent_page(&folio->page)) {
+ ret = -EBUSY;
+ goto err;
+ }
+
continue;
+ }
- if (gup_flags & FOLL_PIN)
- unpin_user_page(pages[i]);
- else
- put_page(pages[i]);
+ /*
+ * We can't migrate pages with unexpected references, so drop
+ * the reference obtained by __get_user_pages_locked().
+ * Migrating pages have been added to movable_page_list after
+ * calling folio_isolate_lru() which takes a reference so the
+ * page won't be freed if it's migrating.
+ */
+ unpin_user_page(pages[i]);
+ pages[i] = NULL;
}
- if (!list_empty(&movable_page_list)) {
+ if (!list_empty(movable_page_list)) {
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_NOWARN,
};
- ret = migrate_pages(&movable_page_list, alloc_migration_target,
- NULL, (unsigned long)&mtc, MIGRATE_SYNC,
- MR_LONGTERM_PIN, NULL);
- if (ret > 0) /* number of pages not migrated */
+ if (migrate_pages(movable_page_list, alloc_migration_target,
+ NULL, (unsigned long)&mtc, MIGRATE_SYNC,
+ MR_LONGTERM_PIN, NULL)) {
ret = -ENOMEM;
+ goto err;
+ }
}
- if (ret && !list_empty(&movable_page_list))
- putback_movable_pages(&movable_page_list);
+ putback_movable_pages(movable_page_list);
+
+ return -EAGAIN;
+
+err:
+ for (i = 0; i < nr_pages; i++)
+ if (pages[i])
+ unpin_user_page(pages[i]);
+ putback_movable_pages(movable_page_list);
+
return ret;
}
+
+/*
+ * Check whether all pages are *allowed* to be pinned. Rather confusingly, all
+ * pages in the range are required to be pinned via FOLL_PIN, before calling
+ * this routine.
+ *
+ * If any pages in the range are not allowed to be pinned, then this routine
+ * will migrate those pages away, unpin all the pages in the range and return
+ * -EAGAIN. The caller should re-pin the entire range with FOLL_PIN and then
+ * call this routine again.
+ *
+ * If an error other than -EAGAIN occurs, this indicates a migration failure.
+ * The caller should give up, and propagate the error back up the call stack.
+ *
+ * If everything is OK and all pages in the range are allowed to be pinned, then
+ * this routine leaves all pages pinned and returns zero for success.
+ */
+static long check_and_migrate_movable_pages(unsigned long nr_pages,
+ struct page **pages)
+{
+ unsigned long collected;
+ LIST_HEAD(movable_page_list);
+
+ collected = collect_longterm_unpinnable_pages(&movable_page_list,
+ nr_pages, pages);
+ if (!collected)
+ return 0;
+
+ return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages,
+ pages);
+}
#else
static long check_and_migrate_movable_pages(unsigned long nr_pages,
- struct page **pages,
- unsigned int gup_flags)
+ struct page **pages)
{
- return nr_pages;
+ return 0;
}
#endif /* CONFIG_MIGRATION */
@@ -2068,22 +2096,36 @@ static long __gup_longterm_locked(struct mm_struct *mm,
unsigned int gup_flags)
{
unsigned int flags;
- long rc;
+ long rc, nr_pinned_pages;
if (!(gup_flags & FOLL_LONGTERM))
return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
NULL, gup_flags);
+
+ /*
+ * If we get to this point then FOLL_LONGTERM is set, and FOLL_LONGTERM
+ * implies FOLL_PIN (although the reverse is not true). Therefore it is
+ * correct to unconditionally call check_and_migrate_movable_pages()
+ * which assumes pages have been pinned via FOLL_PIN.
+ *
+ * Enforce the above reasoning by asserting that FOLL_PIN is set.
+ */
+ if (WARN_ON(!(gup_flags & FOLL_PIN)))
+ return -EINVAL;
flags = memalloc_pin_save();
do {
- rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
- NULL, gup_flags);
- if (rc <= 0)
+ nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages,
+ pages, vmas, NULL,
+ gup_flags);
+ if (nr_pinned_pages <= 0) {
+ rc = nr_pinned_pages;
break;
- rc = check_and_migrate_movable_pages(rc, pages, gup_flags);
- } while (!rc);
+ }
+ rc = check_and_migrate_movable_pages(nr_pinned_pages, pages);
+ } while (rc == -EAGAIN);
memalloc_pin_restore(flags);
- return rc;
+ return rc ? rc : nr_pinned_pages;
}
static bool is_valid_gup_flags(unsigned int gup_flags)
@@ -2378,11 +2420,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
struct page *page;
struct folio *folio;
- /*
- * Similar to the PMD case below, NUMA hinting must take slow
- * path using the pte_protnone check.
- */
- if (pte_protnone(pte))
+ if (pte_protnone(pte) && !gup_can_follow_protnone(flags))
goto pte_unmap;
if (!pte_access_permitted(pte, flags & FOLL_WRITE))
@@ -2766,12 +2804,8 @@ static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned lo
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
pmd_devmap(pmd))) {
- /*
- * NUMA hinting faults need to be handled in the GUP
- * slowpath for accounting purposes and so that they
- * can be serialised against THP migration.
- */
- if (pmd_protnone(pmd))
+ if (pmd_protnone(pmd) &&
+ !gup_can_follow_protnone(flags))
return 0;
if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
diff --git a/mm/hmm.c b/mm/hmm.c
index f2aa63b94d9b..3850fb625dda 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -253,7 +253,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
cpu_flags = HMM_PFN_VALID;
if (is_writable_device_private_entry(entry))
cpu_flags |= HMM_PFN_WRITE;
- *hmm_pfn = swp_offset(entry) | cpu_flags;
+ *hmm_pfn = swp_offset_pfn(entry) | cpu_flags;
return 0;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f42bb51e023a..1cc4a5f4791e 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -36,6 +36,7 @@
#include <linux/numa.h>
#include <linux/page_owner.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -70,9 +71,8 @@ static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
unsigned long huge_zero_pfn __read_mostly = ~0UL;
-bool hugepage_vma_check(struct vm_area_struct *vma,
- unsigned long vm_flags,
- bool smaps, bool in_pf)
+bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
+ bool smaps, bool in_pf, bool enforce_sysfs)
{
if (!vma->vm_mm) /* vdso */
return false;
@@ -119,13 +119,12 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
* own flags.
*/
if (!in_pf && shmem_file(vma->vm_file))
- return shmem_huge_enabled(vma);
+ return shmem_huge_enabled(vma, !enforce_sysfs);
- if (!hugepage_flags_enabled())
- return false;
-
- /* THP settings require madvise. */
- if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always())
+ /* Enforce sysfs THP requirements as necessary */
+ if (enforce_sysfs &&
+ (!hugepage_flags_enabled() || (!(vm_flags & VM_HUGEPAGE) &&
+ !hugepage_flags_always())))
return false;
/* Only regular file is valid */
@@ -164,7 +163,6 @@ retry:
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
return false;
}
- count_vm_event(THP_ZERO_PAGE_ALLOC);
preempt_disable();
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
preempt_enable();
@@ -176,6 +174,7 @@ retry:
/* We take additional reference here. It will be put back by shrinker */
atomic_set(&huge_zero_refcount, 2);
preempt_enable();
+ count_vm_event(THP_ZERO_PAGE_ALLOC);
return true;
}
@@ -772,8 +771,7 @@ static void set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
return;
entry = mk_pmd(zero_page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);
- if (pgtable)
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
mm_inc_nr_ptes(mm);
}
@@ -1307,6 +1305,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
{
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
pmd_t orig_pmd = vmf->orig_pmd;
@@ -1328,46 +1327,48 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
}
page = pmd_page(orig_pmd);
+ folio = page_folio(page);
VM_BUG_ON_PAGE(!PageHead(page), page);
/* Early check when only holding the PT lock. */
if (PageAnonExclusive(page))
goto reuse;
- if (!trylock_page(page)) {
- get_page(page);
+ if (!folio_trylock(folio)) {
+ folio_get(folio);
spin_unlock(vmf->ptl);
- lock_page(page);
+ folio_lock(folio);
spin_lock(vmf->ptl);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd))) {
spin_unlock(vmf->ptl);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return 0;
}
- put_page(page);
+ folio_put(folio);
}
/* Recheck after temporarily dropping the PT lock. */
if (PageAnonExclusive(page)) {
- unlock_page(page);
+ folio_unlock(folio);
goto reuse;
}
/*
- * See do_wp_page(): we can only reuse the page exclusively if there are
- * no additional references. Note that we always drain the LRU
- * pagevecs immediately after adding a THP.
+ * See do_wp_page(): we can only reuse the folio exclusively if
+ * there are no additional references. Note that we always drain
+ * the LRU pagevecs immediately after adding a THP.
*/
- if (page_count(page) > 1 + PageSwapCache(page) * thp_nr_pages(page))
+ if (folio_ref_count(folio) >
+ 1 + folio_test_swapcache(folio) * folio_nr_pages(folio))
goto unlock_fallback;
- if (PageSwapCache(page))
- try_to_free_swap(page);
- if (page_count(page) == 1) {
+ if (folio_test_swapcache(folio))
+ folio_free_swap(folio);
+ if (folio_ref_count(folio) == 1) {
pmd_t entry;
page_move_anon_rmap(page, vma);
- unlock_page(page);
+ folio_unlock(folio);
reuse:
if (unlikely(unshare)) {
spin_unlock(vmf->ptl);
@@ -1382,7 +1383,7 @@ reuse:
}
unlock_fallback:
- unlock_page(page);
+ folio_unlock(folio);
spin_unlock(vmf->ptl);
fallback:
__split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
@@ -1449,7 +1450,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
return ERR_PTR(-EFAULT);
/* Full NUMA hinting faults to serialise migration in fault paths */
- if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
+ if (pmd_protnone(*pmd) && !gup_can_follow_protnone(flags))
return NULL;
if (!pmd_write(*pmd) && gup_must_unshare(flags, page))
@@ -1479,7 +1480,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
struct page *page;
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int page_nid = NUMA_NO_NODE;
- int target_nid, last_cpupid = -1;
+ int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
bool migrated = false;
bool was_writable = pmd_savedwrite(oldpmd);
int flags = 0;
@@ -1500,7 +1501,12 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
flags |= TNF_NO_GROUP;
page_nid = page_to_nid(page);
- last_cpupid = page_cpupid_last(page);
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if (node_is_toptier(page_nid))
+ last_cpupid = page_cpupid_last(page);
target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
&flags);
@@ -1824,6 +1830,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (prot_numa) {
struct page *page;
+ bool toptier;
/*
* Avoid trapping faults against the zero page. The read-only
* data is likely to be read-cached on the local CPU and
@@ -1836,13 +1843,18 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
goto unlock;
page = pmd_page(*pmd);
+ toptier = node_is_toptier(page_to_nid(page));
/*
* Skip scanning top tier node if normal numa
* balancing is disabled
*/
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
- node_is_toptier(page_to_nid(page)))
+ toptier)
goto unlock;
+
+ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
+ !toptier)
+ xchg_page_access_time(page, jiffies_to_msecs(jiffies));
}
/*
* In case prot_numa, we are under mmap_read_lock(mm). It's critical
@@ -2029,7 +2041,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
pgtable_t pgtable;
pmd_t old_pmd, _pmd;
bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false;
- bool anon_exclusive = false;
+ bool anon_exclusive = false, dirty = false;
unsigned long addr;
int i;
@@ -2113,13 +2125,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
write = is_writable_migration_entry(entry);
if (PageAnon(page))
anon_exclusive = is_readable_exclusive_migration_entry(entry);
- young = false;
+ young = is_migration_entry_young(entry);
+ dirty = is_migration_entry_dirty(entry);
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
page = pmd_page(old_pmd);
- if (pmd_dirty(old_pmd))
+ if (pmd_dirty(old_pmd)) {
+ dirty = true;
SetPageDirty(page);
+ }
write = pmd_write(old_pmd);
young = pmd_young(old_pmd);
soft_dirty = pmd_soft_dirty(old_pmd);
@@ -2140,6 +2155,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*
* In case we cannot clear PageAnonExclusive(), split the PMD
* only and let try_to_migrate_one() fail later.
+ *
+ * See page_try_share_anon_rmap(): invalidate PMD first.
*/
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
@@ -2171,6 +2188,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
else
swp_entry = make_readable_migration_entry(
page_to_pfn(page + i));
+ if (young)
+ swp_entry = make_migration_entry_young(swp_entry);
+ if (dirty)
+ swp_entry = make_migration_entry_dirty(swp_entry);
entry = swp_entry_to_pte(swp_entry);
if (soft_dirty)
entry = pte_swp_mksoft_dirty(entry);
@@ -2185,6 +2206,9 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
entry = pte_wrprotect(entry);
if (!young)
entry = pte_mkold(entry);
+ /* NOTE: this may set soft-dirty too on some archs */
+ if (dirty)
+ entry = pte_mkdirty(entry);
if (soft_dirty)
entry = pte_mksoft_dirty(entry);
if (uffd_wp)
@@ -2288,25 +2312,11 @@ out:
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct folio *folio)
{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
+ pmd_t *pmd = mm_find_pmd(vma->vm_mm, address);
- pgd = pgd_offset(vma->vm_mm, address);
- if (!pgd_present(*pgd))
+ if (!pmd)
return;
- p4d = p4d_offset(pgd, address);
- if (!p4d_present(*p4d))
- return;
-
- pud = pud_offset(p4d, address);
- if (!pud_present(*pud))
- return;
-
- pmd = pmd_offset(pud, address);
-
__split_huge_pmd(vma, pmd, address, freeze, folio);
}
@@ -2334,24 +2344,23 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
split_huge_pmd_if_needed(vma, end);
/*
- * If we're also updating the vma->vm_next->vm_start,
+ * If we're also updating the next vma vm_start,
* check if we need to split it.
*/
if (adjust_next > 0) {
- struct vm_area_struct *next = vma->vm_next;
+ struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
unsigned long nstart = next->vm_start;
nstart += adjust_next;
split_huge_pmd_if_needed(next, nstart);
}
}
-static void unmap_page(struct page *page)
+static void unmap_folio(struct folio *folio)
{
- struct folio *folio = page_folio(page);
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
TTU_SYNC;
- VM_BUG_ON_PAGE(!PageHead(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
/*
* Anon pages need migration entries to preserve them, but file
@@ -2368,7 +2377,7 @@ static void remap_page(struct folio *folio, unsigned long nr)
{
int i = 0;
- /* If unmap_page() uses try_to_migrate() on file, remove this check */
+ /* If unmap_folio() uses try_to_migrate() on file, remove this check */
if (!folio_test_anon(folio))
return;
for (;;) {
@@ -2418,7 +2427,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
* for example lock_page() which set PG_waiters.
*
* Note that for mapped sub-pages of an anonymous THP,
- * PG_anon_exclusive has been cleared in unmap_page() and is stored in
+ * PG_anon_exclusive has been cleared in unmap_folio() and is stored in
* the migration entry instead from where remap_page() will restore it.
* We can still have PG_anon_exclusive set on effectively unmapped and
* unreferenced sub-pages of an anonymous THP: we can simply drop
@@ -2438,7 +2447,8 @@ static void __split_huge_page_tail(struct page *head, int tail,
#ifdef CONFIG_64BIT
(1L << PG_arch_2) |
#endif
- (1L << PG_dirty)));
+ (1L << PG_dirty) |
+ LRU_GEN_MASK | LRU_REFS_MASK));
/* ->mapping in first tail page is compound_mapcount */
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
@@ -2611,27 +2621,26 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
int split_huge_page_to_list(struct page *page, struct list_head *list)
{
struct folio *folio = page_folio(page);
- struct page *head = &folio->page;
- struct deferred_split *ds_queue = get_deferred_split_queue(head);
- XA_STATE(xas, &head->mapping->i_pages, head->index);
+ struct deferred_split *ds_queue = get_deferred_split_queue(&folio->page);
+ XA_STATE(xas, &folio->mapping->i_pages, folio->index);
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
int extra_pins, ret;
pgoff_t end;
bool is_hzp;
- VM_BUG_ON_PAGE(!PageLocked(head), head);
- VM_BUG_ON_PAGE(!PageCompound(head), head);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
- is_hzp = is_huge_zero_page(head);
- VM_WARN_ON_ONCE_PAGE(is_hzp, head);
+ is_hzp = is_huge_zero_page(&folio->page);
+ VM_WARN_ON_ONCE_FOLIO(is_hzp, folio);
if (is_hzp)
return -EBUSY;
- if (PageWriteback(head))
+ if (folio_test_writeback(folio))
return -EBUSY;
- if (PageAnon(head)) {
+ if (folio_test_anon(folio)) {
/*
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
@@ -2640,7 +2649,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* is taken to serialise against parallel split or collapse
* operations.
*/
- anon_vma = page_get_anon_vma(head);
+ anon_vma = folio_get_anon_vma(folio);
if (!anon_vma) {
ret = -EBUSY;
goto out;
@@ -2649,7 +2658,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
- mapping = head->mapping;
+ gfp_t gfp;
+
+ mapping = folio->mapping;
/* Truncated ? */
if (!mapping) {
@@ -2657,8 +2668,16 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out;
}
- xas_split_alloc(&xas, head, compound_order(head),
- mapping_gfp_mask(mapping) & GFP_RECLAIM_MASK);
+ gfp = current_gfp_context(mapping_gfp_mask(mapping) &
+ GFP_RECLAIM_MASK);
+
+ if (folio_test_private(folio) &&
+ !filemap_release_folio(folio, gfp)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ xas_split_alloc(&xas, folio, folio_order(folio), gfp);
if (xas_error(&xas)) {
ret = xas_error(&xas);
goto out;
@@ -2672,7 +2691,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
* which cannot be nested inside the page tree lock. So note
* end now: i_size itself may be changed at any moment, but
- * head page lock is good enough to serialize the trimming.
+ * folio lock is good enough to serialize the trimming.
*/
end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
if (shmem_mapping(mapping))
@@ -2680,7 +2699,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
/*
- * Racy check if we can split the page, before unmap_page() will
+ * Racy check if we can split the page, before unmap_folio() will
* split PMDs
*/
if (!can_split_folio(folio, &extra_pins)) {
@@ -2688,38 +2707,38 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
goto out_unlock;
}
- unmap_page(head);
+ unmap_folio(folio);
/* block interrupt reentry in xa_lock and spinlock */
local_irq_disable();
if (mapping) {
/*
- * Check if the head page is present in page cache.
- * We assume all tail are present too, if head is there.
+ * Check if the folio is present in page cache.
+ * We assume all tail are present too, if folio is there.
*/
xas_lock(&xas);
xas_reset(&xas);
- if (xas_load(&xas) != head)
+ if (xas_load(&xas) != folio)
goto fail;
}
/* Prevent deferred_split_scan() touching ->_refcount */
spin_lock(&ds_queue->split_queue_lock);
- if (page_ref_freeze(head, 1 + extra_pins)) {
- if (!list_empty(page_deferred_list(head))) {
+ if (folio_ref_freeze(folio, 1 + extra_pins)) {
+ if (!list_empty(page_deferred_list(&folio->page))) {
ds_queue->split_queue_len--;
- list_del(page_deferred_list(head));
+ list_del(page_deferred_list(&folio->page));
}
spin_unlock(&ds_queue->split_queue_lock);
if (mapping) {
- int nr = thp_nr_pages(head);
+ int nr = folio_nr_pages(folio);
- xas_split(&xas, head, thp_order(head));
- if (PageSwapBacked(head)) {
- __mod_lruvec_page_state(head, NR_SHMEM_THPS,
+ xas_split(&xas, folio, folio_order(folio));
+ if (folio_test_swapbacked(folio)) {
+ __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS,
-nr);
} else {
- __mod_lruvec_page_state(head, NR_FILE_THPS,
+ __lruvec_stat_mod_folio(folio, NR_FILE_THPS,
-nr);
filemap_nr_thps_dec(mapping);
}
@@ -2983,7 +3002,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
/* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
- if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+ if (IS_ERR_OR_NULL(page))
continue;
if (!is_transparent_hugepage(page))
@@ -3175,6 +3194,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
+ /* See page_try_share_anon_rmap(): invalidate PMD first. */
anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
if (anon_exclusive && page_try_share_anon_rmap(page)) {
set_pmd_at(mm, address, pvmw->pmd, pmdval);
@@ -3189,6 +3209,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
else
entry = make_readable_migration_entry(page_to_pfn(page));
+ if (pmd_young(pmdval))
+ entry = make_migration_entry_young(entry);
+ if (pmd_dirty(pmdval))
+ entry = make_migration_entry_dirty(entry);
pmdswp = swp_entry_to_pmd(entry);
if (pmd_soft_dirty(pmdval))
pmdswp = pmd_swp_mksoft_dirty(pmdswp);
@@ -3214,13 +3238,18 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
entry = pmd_to_swp_entry(*pvmw->pmd);
get_page(new);
- pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)));
+ pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
if (pmd_swp_soft_dirty(*pvmw->pmd))
pmde = pmd_mksoft_dirty(pmde);
if (is_writable_migration_entry(entry))
pmde = maybe_pmd_mkwrite(pmde, vma);
if (pmd_swp_uffd_wp(*pvmw->pmd))
pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
+ if (!is_migration_entry_young(entry))
+ pmde = pmd_mkold(pmde);
+ /* NOTE: this may contain setting soft-dirty on some archs */
+ if (PageDirty(new) && is_migration_entry_dirty(entry))
+ pmde = pmd_mkdirty(pmde);
if (PageAnon(new)) {
rmap_t rmap_flags = RMAP_COMPOUND;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0bdfc7e1c933..0ad53ad98e74 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -33,6 +33,7 @@
#include <linux/migrate.h>
#include <linux/nospec.h>
#include <linux/delayacct.h>
+#include <linux/memory.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -90,6 +91,9 @@ struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
+static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
+static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -257,7 +261,7 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
static struct file_region *
get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
{
- struct file_region *nrg = NULL;
+ struct file_region *nrg;
VM_BUG_ON(resv->region_cache_count <= 0);
@@ -339,7 +343,7 @@ static bool has_same_uncharge_info(struct file_region *rg,
static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
{
- struct file_region *nrg = NULL, *prg = NULL;
+ struct file_region *nrg, *prg;
prg = list_prev_entry(rg, link);
if (&prg->link != &resv->regions && prg->to == rg->from &&
@@ -456,14 +460,12 @@ static int allocate_file_region_entries(struct resv_map *resv,
int regions_needed)
__must_hold(&resv->lock)
{
- struct list_head allocated_regions;
+ LIST_HEAD(allocated_regions);
int to_allocate = 0, i = 0;
struct file_region *trg = NULL, *rg = NULL;
VM_BUG_ON(regions_needed < 0);
- INIT_LIST_HEAD(&allocated_regions);
-
/*
* Check for sufficient descriptors in the cache to accommodate
* the number of in progress add operations plus regions_needed.
@@ -860,7 +862,7 @@ __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
* is guaranteed to have their future faults succeed.
*
- * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
+ * With the exception of hugetlb_dup_vma_private() which is called at fork(),
* the reserve counters are updated with the hugetlb_lock held. It is safe
* to reset the VMA at fork() time as it is not in use yet and there is no
* chance of the global counters getting corrupted as a result of the values.
@@ -1007,12 +1009,20 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
return (get_vma_private_data(vma) & flag) != 0;
}
-/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
-void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
+void hugetlb_dup_vma_private(struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
+ /*
+ * Clear vm_private_data
+ * - For MAP_PRIVATE mappings, this is the reserve map which does
+ * not apply to children. Faults generated by the children are
+ * not guaranteed to succeed, even if read-only.
+ * - For shared mappings this is a per-vma semaphore that may be
+ * allocated in a subsequent call to hugetlb_vm_op_open.
+ */
+ vma->vm_private_data = (void *)0;
if (!(vma->vm_flags & VM_MAYSHARE))
- vma->vm_private_data = (void *)0;
+ return;
}
/*
@@ -1043,7 +1053,7 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
kref_put(&reservations->refs, resv_map_release);
}
- reset_vma_resv_huge_pages(vma);
+ hugetlb_dup_vma_private(vma);
}
/* Returns true if the VMA has associated reserve pages */
@@ -1182,6 +1192,11 @@ retry_cpuset:
return NULL;
}
+static unsigned long available_huge_pages(struct hstate *h)
+{
+ return h->free_huge_pages - h->resv_huge_pages;
+}
+
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve,
@@ -1198,12 +1213,11 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
- if (!vma_has_reserves(vma, chg) &&
- h->free_huge_pages - h->resv_huge_pages == 0)
+ if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
goto err;
/* If reserves cannot be used, ensure enough pages are in the pool */
- if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
+ if (avoid_reserve && !available_huge_pages(h))
goto err;
gfp_mask = htlb_alloc_mask(h);
@@ -1308,12 +1322,13 @@ static void __destroy_compound_gigantic_page(struct page *page,
{
int i;
int nr_pages = 1 << order;
- struct page *p = page + 1;
+ struct page *p;
atomic_set(compound_mapcount_ptr(page), 0);
atomic_set(compound_pincount_ptr(page), 0);
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ for (i = 1; i < nr_pages; i++) {
+ p = nth_page(page, i);
p->mapping = NULL;
clear_compound_head(p);
if (!demote)
@@ -1506,6 +1521,10 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
set_page_private(page, 0);
+ /*
+ * We have to set HPageVmemmapOptimized again as above
+ * set_page_private(page, 0) cleared it.
+ */
SetHPageVmemmapOptimized(page);
/*
@@ -1530,7 +1549,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
static void __update_and_free_page(struct hstate *h, struct page *page)
{
int i;
- struct page *subpage = page;
+ struct page *subpage;
if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
return;
@@ -1561,8 +1580,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
if (unlikely(PageHWPoison(page)))
hugetlb_clear_page_hwpoison(page);
- for (i = 0; i < pages_per_huge_page(h);
- i++, subpage = mem_map_next(subpage, page, i)) {
+ for (i = 0; i < pages_per_huge_page(h); i++) {
+ subpage = nth_page(page, i);
subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
@@ -1769,13 +1788,14 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
{
int i, j;
int nr_pages = 1 << order;
- struct page *p = page + 1;
+ struct page *p;
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
- __ClearPageReserved(page);
__SetPageHead(page);
- for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
+ for (i = 0; i < nr_pages; i++) {
+ p = nth_page(page, i);
+
/*
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
@@ -1814,22 +1834,26 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
} else {
VM_BUG_ON_PAGE(page_count(p), p);
}
- set_compound_head(p, page);
+ if (i != 0)
+ set_compound_head(p, page);
}
atomic_set(compound_mapcount_ptr(page), -1);
atomic_set(compound_pincount_ptr(page), 0);
return true;
out_error:
- /* undo tail page modifications made above */
- p = page + 1;
- for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
- clear_compound_head(p);
+ /* undo page modifications made above */
+ for (j = 0; j < i; j++) {
+ p = nth_page(page, j);
+ if (j != 0)
+ clear_compound_head(p);
set_page_refcounted(p);
}
/* need to clear PG_reserved on remaining tail pages */
- for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
+ for (; j < nr_pages; j++) {
+ p = nth_page(page, j);
__ClearPageReserved(p);
+ }
set_compound_order(page, 0);
#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
@@ -1918,6 +1942,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
int order = huge_page_order(h);
struct page *page;
bool alloc_try_hard = true;
+ bool retry = true;
/*
* By default we always try hard to allocate the page with
@@ -1933,7 +1958,21 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
gfp_mask |= __GFP_RETRY_MAYFAIL;
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
+retry:
page = __alloc_pages(gfp_mask, order, nid, nmask);
+
+ /* Freeze head page */
+ if (page && !page_ref_freeze(page, 1)) {
+ __free_pages(page, order);
+ if (retry) { /* retry once */
+ retry = false;
+ goto retry;
+ }
+ /* WOW! twice in a row. */
+ pr_warn("HugeTLB head page unexpected inflated ref count\n");
+ page = NULL;
+ }
+
if (page)
__count_vm_event(HTLB_BUDDY_PGALLOC);
else
@@ -1961,6 +2000,9 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
/*
* Common helper to allocate a fresh hugetlb page. All specific allocators
* should use this function to get new hugetlb pages
+ *
+ * Note that returned page is 'frozen': ref count of head page and all tail
+ * pages is zero.
*/
static struct page *alloc_fresh_huge_page(struct hstate *h,
gfp_t gfp_mask, int nid, nodemask_t *nmask,
@@ -2018,7 +2060,7 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
if (!page)
return 0;
- put_page(page); /* free it into the hugepage allocator */
+ free_huge_page(page); /* free it into the hugepage allocator */
return 1;
}
@@ -2087,7 +2129,7 @@ retry:
if (!page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
- if (h->free_huge_pages - h->resv_huge_pages == 0)
+ if (!available_huge_pages(h))
goto out;
/*
@@ -2175,10 +2217,9 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
* Allocates a fresh surplus page from the page allocator.
*/
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
- int nid, nodemask_t *nmask, bool zero_ref)
+ int nid, nodemask_t *nmask)
{
struct page *page = NULL;
- bool retry = false;
if (hstate_is_gigantic(h))
return NULL;
@@ -2188,7 +2229,6 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
goto out_unlock;
spin_unlock_irq(&hugetlb_lock);
-retry:
page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
if (!page)
return NULL;
@@ -2204,34 +2244,10 @@ retry:
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
SetHPageTemporary(page);
spin_unlock_irq(&hugetlb_lock);
- put_page(page);
+ free_huge_page(page);
return NULL;
}
- if (zero_ref) {
- /*
- * Caller requires a page with zero ref count.
- * We will drop ref count here. If someone else is holding
- * a ref, the page will be freed when they drop it. Abuse
- * temporary page flag to accomplish this.
- */
- SetHPageTemporary(page);
- if (!put_page_testzero(page)) {
- /*
- * Unexpected inflated ref count on freshly allocated
- * huge. Retry once.
- */
- pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n");
- spin_unlock_irq(&hugetlb_lock);
- if (retry)
- return NULL;
-
- retry = true;
- goto retry;
- }
- ClearHPageTemporary(page);
- }
-
h->surplus_huge_pages++;
h->surplus_huge_pages_node[page_to_nid(page)]++;
@@ -2253,6 +2269,9 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
if (!page)
return NULL;
+ /* fresh huge pages are frozen */
+ set_page_refcounted(page);
+
/*
* We do not account these pages as surplus because they are only
* temporary and will be released properly on the last reference
@@ -2280,14 +2299,14 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
gfp_t gfp = gfp_mask | __GFP_NOWARN;
gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
- page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false);
+ page = alloc_surplus_huge_page(h, gfp, nid, nodemask);
/* Fallback to all nodes if page==NULL */
nodemask = NULL;
}
if (!page)
- page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false);
+ page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
mpol_cond_put(mpol);
return page;
}
@@ -2297,7 +2316,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask)
{
spin_lock_irq(&hugetlb_lock);
- if (h->free_huge_pages - h->resv_huge_pages > 0) {
+ if (available_huge_pages(h)) {
struct page *page;
page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
@@ -2336,7 +2355,7 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
static int gather_surplus_pages(struct hstate *h, long delta)
__must_hold(&hugetlb_lock)
{
- struct list_head surplus_list;
+ LIST_HEAD(surplus_list);
struct page *page, *tmp;
int ret;
long i;
@@ -2351,14 +2370,13 @@ static int gather_surplus_pages(struct hstate *h, long delta)
}
allocated = 0;
- INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
spin_unlock_irq(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
- NUMA_NO_NODE, NULL, true);
+ NUMA_NO_NODE, NULL);
if (!page) {
alloc_ok = false;
break;
@@ -2720,7 +2738,6 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
{
gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
int nid = page_to_nid(old_page);
- bool alloc_retry = false;
struct page *new_page;
int ret = 0;
@@ -2731,30 +2748,9 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
* the pool. This simplifies and let us do most of the processing
* under the lock.
*/
-alloc_retry:
new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
if (!new_page)
return -ENOMEM;
- /*
- * If all goes well, this page will be directly added to the free
- * list in the pool. For this the ref count needs to be zero.
- * Attempt to drop now, and retry once if needed. It is VERY
- * unlikely there is another ref on the page.
- *
- * If someone else has a reference to the page, it will be freed
- * when they drop their ref. Abuse temporary page flag to accomplish
- * this. Retry once if there is an inflated ref count.
- */
- SetHPageTemporary(new_page);
- if (!put_page_testzero(new_page)) {
- if (alloc_retry)
- return -EBUSY;
-
- alloc_retry = true;
- goto alloc_retry;
- }
- ClearHPageTemporary(new_page);
-
__prep_new_huge_page(h, new_page);
retry:
@@ -2934,6 +2930,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
}
spin_lock_irq(&hugetlb_lock);
list_add(&page->lru, &h->hugepage_activelist);
+ set_page_refcounted(page);
/* Fall through */
}
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
@@ -3038,7 +3035,7 @@ static void __init gather_bootmem_prealloc(void)
if (prep_compound_gigantic_page(page, huge_page_order(h))) {
WARN_ON(PageReserved(page));
prep_new_huge_page(h, page, page_to_nid(page));
- put_page(page); /* add to the hugepage allocator */
+ free_huge_page(page); /* add to the hugepage allocator */
} else {
/* VERY unlikely inflated ref count on a tail page */
free_gigantic_page(page, huge_page_order(h));
@@ -3070,7 +3067,7 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
&node_states[N_MEMORY], NULL);
if (!page)
break;
- put_page(page); /* free it into the hugepage allocator */
+ free_huge_page(page); /* free it into the hugepage allocator */
}
cond_resched();
}
@@ -3461,9 +3458,8 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
else
prep_compound_page(subpage, target_hstate->order);
set_page_private(subpage, 0);
- set_page_refcounted(subpage);
prep_new_huge_page(target_hstate, subpage, nid);
- put_page(subpage);
+ free_huge_page(subpage);
}
mutex_unlock(&target_hstate->resize_lock);
@@ -3474,7 +3470,8 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
* based on pool changes for the demoted page.
*/
h->max_huge_pages--;
- target_hstate->max_huge_pages += pages_per_huge_page(h);
+ target_hstate->max_huge_pages +=
+ pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
return rc;
}
@@ -3716,7 +3713,7 @@ static ssize_t demote_store(struct kobject *kobj,
unsigned long nr_available;
nodemask_t nodes_allowed, *n_mask;
struct hstate *h;
- int err = 0;
+ int err;
int nid;
err = kstrtoul(buf, 10, &nr_demote);
@@ -3767,8 +3764,7 @@ HSTATE_ATTR_WO(demote);
static ssize_t demote_size_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int nid;
- struct hstate *h = kobj_to_hstate(kobj, &nid);
+ struct hstate *h = kobj_to_hstate(kobj, NULL);
unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
return sysfs_emit(buf, "%lukB\n", demote_size);
@@ -3781,7 +3777,6 @@ static ssize_t demote_size_store(struct kobject *kobj,
struct hstate *h, *demote_hstate;
unsigned long demote_size;
unsigned int demote_order;
- int nid;
demote_size = (unsigned long)memparse(buf, NULL);
@@ -3793,7 +3788,7 @@ static ssize_t demote_size_store(struct kobject *kobj,
return -EINVAL;
/* demote order must be smaller than hstate order */
- h = kobj_to_hstate(kobj, &nid);
+ h = kobj_to_hstate(kobj, NULL);
if (demote_order >= h->order)
return -EINVAL;
@@ -3847,35 +3842,26 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
if (retval) {
kobject_put(hstate_kobjs[hi]);
hstate_kobjs[hi] = NULL;
+ return retval;
}
if (h->demote_order) {
- if (sysfs_create_group(hstate_kobjs[hi],
- &hstate_demote_attr_group))
+ retval = sysfs_create_group(hstate_kobjs[hi],
+ &hstate_demote_attr_group);
+ if (retval) {
pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
+ sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
+ kobject_put(hstate_kobjs[hi]);
+ hstate_kobjs[hi] = NULL;
+ return retval;
+ }
}
- return retval;
-}
-
-static void __init hugetlb_sysfs_init(void)
-{
- struct hstate *h;
- int err;
-
- hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
- if (!hugepages_kobj)
- return;
-
- for_each_hstate(h) {
- err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
- hstate_kobjs, &hstate_attr_group);
- if (err)
- pr_err("HugeTLB: Unable to add hstate %s", h->name);
- }
+ return 0;
}
#ifdef CONFIG_NUMA
+static bool hugetlb_sysfs_initialized __ro_after_init;
/*
* node_hstate/s - associate per node hstate attributes, via their kobjects,
@@ -3931,7 +3917,7 @@ static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
* Unregister hstate attributes from a single node device.
* No-op if no hstate attributes attached.
*/
-static void hugetlb_unregister_node(struct node *node)
+void hugetlb_unregister_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
@@ -3941,10 +3927,15 @@ static void hugetlb_unregister_node(struct node *node)
for_each_hstate(h) {
int idx = hstate_index(h);
- if (nhs->hstate_kobjs[idx]) {
- kobject_put(nhs->hstate_kobjs[idx]);
- nhs->hstate_kobjs[idx] = NULL;
- }
+ struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
+
+ if (!hstate_kobj)
+ continue;
+ if (h->demote_order)
+ sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
+ sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
+ kobject_put(hstate_kobj);
+ nhs->hstate_kobjs[idx] = NULL;
}
kobject_put(nhs->hugepages_kobj);
@@ -3956,12 +3947,15 @@ static void hugetlb_unregister_node(struct node *node)
* Register hstate attributes for a single node device.
* No-op if attributes already registered.
*/
-static void hugetlb_register_node(struct node *node)
+void hugetlb_register_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
int err;
+ if (!hugetlb_sysfs_initialized)
+ return;
+
if (nhs->hugepages_kobj)
return; /* already allocated */
@@ -3992,18 +3986,8 @@ static void __init hugetlb_register_all_nodes(void)
{
int nid;
- for_each_node_state(nid, N_MEMORY) {
- struct node *node = node_devices[nid];
- if (node->dev.id == nid)
- hugetlb_register_node(node);
- }
-
- /*
- * Let the node device driver know we're here so it can
- * [un]register hstate attributes on node hotplug.
- */
- register_hugetlbfs_with_node(hugetlb_register_node,
- hugetlb_unregister_node);
+ for_each_online_node(nid)
+ hugetlb_register_node(node_devices[nid]);
}
#else /* !CONFIG_NUMA */
@@ -4019,6 +4003,36 @@ static void hugetlb_register_all_nodes(void) { }
#endif
+#ifdef CONFIG_CMA
+static void __init hugetlb_cma_check(void);
+#else
+static inline __init void hugetlb_cma_check(void)
+{
+}
+#endif
+
+static void __init hugetlb_sysfs_init(void)
+{
+ struct hstate *h;
+ int err;
+
+ hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
+ if (!hugepages_kobj)
+ return;
+
+ for_each_hstate(h) {
+ err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
+ hstate_kobjs, &hstate_attr_group);
+ if (err)
+ pr_err("HugeTLB: Unable to add hstate %s", h->name);
+ }
+
+#ifdef CONFIG_NUMA
+ hugetlb_sysfs_initialized = true;
+#endif
+ hugetlb_register_all_nodes();
+}
+
static int __init hugetlb_init(void)
{
int i;
@@ -4073,7 +4087,6 @@ static int __init hugetlb_init(void)
report_hugepages();
hugetlb_sysfs_init();
- hugetlb_register_all_nodes();
hugetlb_cgroup_file_init();
#ifdef CONFIG_SMP
@@ -4118,7 +4131,7 @@ void __init hugetlb_add_hstate(unsigned int order)
h->next_nid_to_alloc = first_memory_node;
h->next_nid_to_free = first_memory_node;
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
- huge_page_size(h)/1024);
+ huge_page_size(h)/SZ_1K);
parsed_hstate = h;
}
@@ -4133,11 +4146,11 @@ static void __init hugepages_clear_pages_in_node(void)
if (!hugetlb_max_hstate) {
default_hstate_max_huge_pages = 0;
memset(default_hugepages_in_node, 0,
- MAX_NUMNODES * sizeof(unsigned int));
+ sizeof(default_hugepages_in_node));
} else {
parsed_hstate->max_huge_pages = 0;
memset(parsed_hstate->max_huge_pages_node, 0,
- MAX_NUMNODES * sizeof(unsigned int));
+ sizeof(parsed_hstate->max_huge_pages_node));
}
}
@@ -4332,18 +4345,34 @@ static int __init default_hugepagesz_setup(char *s)
}
__setup("default_hugepagesz=", default_hugepagesz_setup);
+static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
+{
+#ifdef CONFIG_NUMA
+ struct mempolicy *mpol = get_task_policy(current);
+
+ /*
+ * Only enforce MPOL_BIND policy which overlaps with cpuset policy
+ * (from policy_nodemask) specifically for hugetlb case
+ */
+ if (mpol->mode == MPOL_BIND &&
+ (apply_policy_zone(mpol, gfp_zone(gfp)) &&
+ cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
+ return &mpol->nodes;
+#endif
+ return NULL;
+}
+
static unsigned int allowed_mems_nr(struct hstate *h)
{
int node;
unsigned int nr = 0;
- nodemask_t *mpol_allowed;
+ nodemask_t *mbind_nodemask;
unsigned int *array = h->free_huge_pages_node;
gfp_t gfp_mask = htlb_alloc_mask(h);
- mpol_allowed = policy_nodemask_current(gfp_mask);
-
+ mbind_nodemask = policy_mbind_nodemask(gfp_mask);
for_each_node_mask(node, cpuset_current_mems_allowed) {
- if (!mpol_allowed || node_isset(node, *mpol_allowed))
+ if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
nr += array[node];
}
@@ -4583,16 +4612,28 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
kref_get(&resv->refs);
}
+
+ /*
+ * vma_lock structure for sharable mappings is vma specific.
+ * Clear old pointer (if copied via vm_area_dup) and create new.
+ */
+ if (vma->vm_flags & VM_MAYSHARE) {
+ vma->vm_private_data = NULL;
+ hugetlb_vma_lock_alloc(vma);
+ }
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
- struct resv_map *resv = vma_resv_map(vma);
+ struct resv_map *resv;
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve, start, end;
long gbl_reserve;
+ hugetlb_vma_lock_free(vma);
+
+ resv = vma_resv_map(vma);
if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return;
@@ -4723,14 +4764,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *dst_vma,
struct vm_area_struct *src_vma)
{
- pte_t *src_pte, *dst_pte, entry, dst_entry;
+ pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr;
bool cow = is_cow_mapping(src_vma->vm_flags);
struct hstate *h = hstate_vma(src_vma);
unsigned long sz = huge_page_size(h);
unsigned long npages = pages_per_huge_page(h);
- struct address_space *mapping = src_vma->vm_file->f_mapping;
struct mmu_notifier_range range;
unsigned long last_addr_mask;
int ret = 0;
@@ -4744,12 +4784,12 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
raw_write_seqcount_begin(&src->write_protect_seq);
} else {
/*
- * For shared mappings i_mmap_rwsem must be held to call
- * huge_pte_alloc, otherwise the returned ptep could go
- * away if part of a shared pmd and another thread calls
- * huge_pmd_unshare.
+ * For shared mappings the vma lock must be held before
+ * calling huge_pte_offset in the src vma. Otherwise, the
+ * returned ptep could go away if part of a shared pmd and
+ * another thread calls huge_pmd_unshare.
*/
- i_mmap_lock_read(mapping);
+ hugetlb_vma_lock_read(src_vma);
}
last_addr_mask = hugetlb_mask_last_page(h);
@@ -4768,15 +4808,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
/*
* If the pagetables are shared don't copy or take references.
- * dst_pte == src_pte is the common case of src/dest sharing.
*
+ * dst_pte == src_pte is the common case of src/dest sharing.
* However, src could have 'unshared' and dst shares with
- * another vma. If dst_pte !none, this implies sharing.
- * Check here before taking page table lock, and once again
- * after taking the lock below.
+ * another vma. So page_count of ptep page is checked instead
+ * to reliably determine whether pte is shared.
*/
- dst_entry = huge_ptep_get(dst_pte);
- if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) {
+ if (page_count(virt_to_page(dst_pte)) > 1) {
addr |= last_addr_mask;
continue;
}
@@ -4785,13 +4823,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
- dst_entry = huge_ptep_get(dst_pte);
again:
- if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+ if (huge_pte_none(entry)) {
/*
- * Skip if src entry none. Also, skip in the
- * unlikely case dst entry !none as this implies
- * sharing with another vma.
+ * Skip if src entry none.
*/
;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
@@ -4870,7 +4905,7 @@ again:
restore_reserve_on_error(h, dst_vma, addr,
new);
put_page(new);
- /* dst_entry won't change as in child */
+ /* huge_ptep of dst_pte won't change as in child */
goto again;
}
hugetlb_install_page(dst_vma, dst_pte, addr, new);
@@ -4902,7 +4937,7 @@ again:
raw_write_seqcount_end(&src->write_protect_seq);
mmu_notifier_invalidate_range_end(&range);
} else {
- i_mmap_unlock_read(mapping);
+ hugetlb_vma_unlock_read(src_vma);
}
return ret;
@@ -4961,6 +4996,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
mmu_notifier_invalidate_range_start(&range);
last_addr_mask = hugetlb_mask_last_page(h);
/* Prevent race with file truncation */
+ hugetlb_vma_lock_write(vma);
i_mmap_lock_write(mapping);
for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
src_pte = huge_pte_offset(mm, old_addr, sz);
@@ -4992,6 +5028,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
i_mmap_unlock_write(mapping);
+ hugetlb_vma_unlock_write(vma);
return len + old_addr - old_end;
}
@@ -5139,19 +5176,22 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
{
+ hugetlb_vma_lock_write(vma);
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+
__unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
/*
- * Clear this flag so that x86's huge_pmd_share page_table_shareable
- * test will fail on a vma being torn down, and not grab a page table
- * on its way out. We're lucky that the flag has such an appropriate
- * name, and can in fact be safely cleared here. We could clear it
- * before the __unmap_hugepage_range above, but all that's necessary
- * is to clear it before releasing the i_mmap_rwsem. This works
- * because in the context this is called, the VMA is about to be
- * destroyed and the i_mmap_rwsem is held.
+ * Unlock and free the vma lock before releasing i_mmap_rwsem. When
+ * the vma_lock is freed, this makes the vma ineligible for pmd
+ * sharing. And, i_mmap_rwsem is required to set up pmd sharing.
+ * This is important as page tables for this unmapped range will
+ * be asynchrously deleted. If the page tables are shared, there
+ * will be issues when accessed by someone else.
*/
- vma->vm_flags &= ~VM_MAYSHARE;
+ __hugetlb_vma_unlock_write_free(vma);
+
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -5316,11 +5356,10 @@ retry_avoidcopy:
u32 hash;
put_page(old_page);
- BUG_ON(huge_pte_none(pte));
/*
- * Drop hugetlb_fault_mutex and i_mmap_rwsem before
- * unmapping. unmapping needs to hold i_mmap_rwsem
- * in write mode. Dropping i_mmap_rwsem in read mode
+ * Drop hugetlb_fault_mutex and vma_lock before
+ * unmapping. unmapping needs to hold vma_lock
+ * in write mode. Dropping vma_lock in read mode
* here is OK as COW mappings do not interact with
* PMD sharing.
*
@@ -5328,13 +5367,13 @@ retry_avoidcopy:
*/
idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
+ hugetlb_vma_unlock_read(vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
unmap_ref_private(mm, vma, old_page, haddr);
- i_mmap_lock_read(mapping);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
+ hugetlb_vma_lock_read(vma);
spin_lock(ptl);
ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
if (likely(ptep &&
@@ -5408,19 +5447,6 @@ out_release_old:
return ret;
}
-/* Return the pagecache page at a given address within a VMA */
-static struct page *hugetlbfs_pagecache_page(struct hstate *h,
- struct vm_area_struct *vma, unsigned long address)
-{
- struct address_space *mapping;
- pgoff_t idx;
-
- mapping = vma->vm_file->f_mapping;
- idx = vma_hugecache_offset(h, vma, address);
-
- return find_lock_page(mapping, idx);
-}
-
/*
* Return whether there is a pagecache page to back given address within VMA.
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
@@ -5441,7 +5467,7 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
return page != NULL;
}
-int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
+int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx)
{
struct folio *folio = page_folio(page);
@@ -5478,7 +5504,6 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
unsigned long addr,
unsigned long reason)
{
- vm_fault_t ret;
u32 hash;
struct vm_fault vmf = {
.vma = vma,
@@ -5496,18 +5521,14 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
};
/*
- * hugetlb_fault_mutex and i_mmap_rwsem must be
- * dropped before handling userfault. Reacquire
- * after handling fault to make calling code simpler.
+ * vma_lock and hugetlb_fault_mutex must be dropped before handling
+ * userfault. Also mmap_lock could be dropped due to handling
+ * userfault, any vma operation should be careful from here.
*/
+ hugetlb_vma_unlock_read(vma);
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
- ret = handle_userfault(&vmf, reason);
- i_mmap_lock_read(mapping);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
-
- return ret;
+ return handle_userfault(&vmf, reason);
}
static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
@@ -5525,6 +5546,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
bool new_page, new_pagecache_page = false;
+ u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
/*
* Currently, we are forced to kill the process in the event the
@@ -5535,29 +5557,24 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
current->pid);
- return ret;
+ goto out;
}
/*
- * We can not race with truncation due to holding i_mmap_rwsem.
- * i_size is modified when holding i_mmap_rwsem, so check here
- * once for faults beyond end of file.
+ * Use page lock to guard against racing truncation
+ * before we get page_table_lock.
*/
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- if (idx >= size)
- goto out;
-
-retry:
new_page = false;
page = find_lock_page(mapping, idx);
if (!page) {
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ if (idx >= size)
+ goto out;
/* Check for page in userfault range */
- if (userfaultfd_missing(vma)) {
- ret = hugetlb_handle_userfault(vma, mapping, idx,
+ if (userfaultfd_missing(vma))
+ return hugetlb_handle_userfault(vma, mapping, idx,
flags, haddr, address,
VM_UFFD_MISSING);
- goto out;
- }
page = alloc_huge_page(vma, haddr, 0);
if (IS_ERR(page)) {
@@ -5585,11 +5602,17 @@ retry:
new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
- int err = huge_add_to_page_cache(page, mapping, idx);
+ int err = hugetlb_add_to_page_cache(page, mapping, idx);
if (err) {
+ /*
+ * err can't be -EEXIST which implies someone
+ * else consumed the reservation since hugetlb
+ * fault mutex is held when add a hugetlb page
+ * to the page cache. So it's safe to call
+ * restore_reserve_on_error() here.
+ */
+ restore_reserve_on_error(h, vma, haddr, page);
put_page(page);
- if (err == -EEXIST)
- goto retry;
goto out;
}
new_pagecache_page = true;
@@ -5617,10 +5640,9 @@ retry:
if (userfaultfd_minor(vma)) {
unlock_page(page);
put_page(page);
- ret = hugetlb_handle_userfault(vma, mapping, idx,
+ return hugetlb_handle_userfault(vma, mapping, idx,
flags, haddr, address,
VM_UFFD_MINOR);
- goto out;
}
}
@@ -5678,15 +5700,17 @@ retry:
unlock_page(page);
out:
+ hugetlb_vma_unlock_read(vma);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
return ret;
backout:
spin_unlock(ptl);
backout_unlocked:
- unlock_page(page);
- /* restore reserve for newly allocated pages not in page cache */
if (new_page && !new_pagecache_page)
restore_reserve_on_error(h, vma, haddr, page);
+
+ unlock_page(page);
put_page(page);
goto out;
}
@@ -5747,40 +5771,41 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
}
/*
- * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
- * until finished with ptep. This serves two purposes:
- * 1) It prevents huge_pmd_unshare from being called elsewhere
- * and making the ptep no longer valid.
- * 2) It synchronizes us with i_size modifications during truncation.
+ * Serialize hugepage allocation and instantiation, so that we don't
+ * get spurious allocation failures if two CPUs race to instantiate
+ * the same page in the page cache.
+ */
+ mapping = vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, vma, haddr);
+ hash = hugetlb_fault_mutex_hash(mapping, idx);
+ mutex_lock(&hugetlb_fault_mutex_table[hash]);
+
+ /*
+ * Acquire vma lock before calling huge_pte_alloc and hold
+ * until finished with ptep. This prevents huge_pmd_unshare from
+ * being called elsewhere and making the ptep no longer valid.
*
* ptep could have already be assigned via huge_pte_offset. That
* is OK, as huge_pte_alloc will return the same value unless
* something has changed.
*/
- mapping = vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
+ hugetlb_vma_lock_read(vma);
ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
if (!ptep) {
- i_mmap_unlock_read(mapping);
+ hugetlb_vma_unlock_read(vma);
+ mutex_unlock(&hugetlb_fault_mutex_table[hash]);
return VM_FAULT_OOM;
}
- /*
- * Serialize hugepage allocation and instantiation, so that we don't
- * get spurious allocation failures if two CPUs race to instantiate
- * the same page in the page cache.
- */
- idx = vma_hugecache_offset(h, vma, haddr);
- hash = hugetlb_fault_mutex_hash(mapping, idx);
- mutex_lock(&hugetlb_fault_mutex_table[hash]);
-
entry = huge_ptep_get(ptep);
/* PTE markers should be handled the same way as none pte */
- if (huge_pte_none_mostly(entry)) {
- ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
+ if (huge_pte_none_mostly(entry))
+ /*
+ * hugetlb_no_page will drop vma lock and hugetlb fault
+ * mutex internally, which make us return immediately.
+ */
+ return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
entry, flags);
- goto out_mutex;
- }
ret = 0;
@@ -5810,7 +5835,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
- pagecache_page = hugetlbfs_pagecache_page(h, vma, haddr);
+ pagecache_page = find_lock_page(mapping, idx);
}
ptl = huge_pte_lock(h, mm, ptep);
@@ -5834,8 +5859,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(pagecache_page);
put_page(pagecache_page);
}
+ hugetlb_vma_unlock_read(vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
return handle_userfault(&vmf, VM_UFFD_WP);
}
@@ -5878,8 +5903,8 @@ out_ptl:
put_page(pagecache_page);
}
out_mutex:
+ hugetlb_vma_unlock_read(vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
/*
* Generally it's safe to hold refcount during waiting page lock. But
* here we just wait to defer the next page fault to avoid busy loop and
@@ -6007,39 +6032,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
/*
* Serialization between remove_inode_hugepages() and
- * huge_add_to_page_cache() below happens through the
+ * hugetlb_add_to_page_cache() below happens through the
* hugetlb_fault_mutex_table that here must be hold by
* the caller.
*/
- ret = huge_add_to_page_cache(page, mapping, idx);
+ ret = hugetlb_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
page_in_pagecache = true;
}
- ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
- spin_lock(ptl);
+ ptl = huge_pte_lock(h, dst_mm, dst_pte);
/*
- * Recheck the i_size after holding PT lock to make sure not
- * to leave any page mapped (as page_mapped()) beyond the end
- * of the i_size (remove_inode_hugepages() is strict about
- * enforcing that). If we bail out here, we'll also leave a
- * page in the radix tree in the vm_shared case beyond the end
- * of the i_size, but remove_inode_hugepages() will take care
- * of it as soon as we drop the hugetlb_fault_mutex_table.
- */
- size = i_size_read(mapping->host) >> huge_page_shift(h);
- ret = -EFAULT;
- if (idx >= size)
- goto out_release_unlock;
-
- ret = -EEXIST;
- /*
* We allow to overwrite a pte marker: consider when both MISSING|WP
* registered, we firstly wr-protect a none pte which has no page cache
* page backing it, then access the page.
*/
+ ret = -EEXIST;
if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
goto out_release_unlock;
@@ -6107,7 +6117,7 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
for (nr = 0; nr < refs; nr++) {
if (likely(pages))
- pages[nr] = mem_map_offset(page, nr);
+ pages[nr] = nth_page(page, nr);
if (vmas)
vmas[nr] = vma;
}
@@ -6271,7 +6281,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
(vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
if (pages || vmas)
- record_subpages_vmas(mem_map_offset(page, pfn_offset),
+ record_subpages_vmas(nth_page(page, pfn_offset),
vma, refs,
likely(pages) ? pages + i : NULL,
vmas ? vmas + i : NULL);
@@ -6342,8 +6352,9 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
flush_cache_range(vma, range.start, range.end);
mmu_notifier_invalidate_range_start(&range);
- last_addr_mask = hugetlb_mask_last_page(h);
+ hugetlb_vma_lock_write(vma);
i_mmap_lock_write(vma->vm_file->f_mapping);
+ last_addr_mask = hugetlb_mask_last_page(h);
for (; address < end; address += psize) {
spinlock_t *ptl;
ptep = huge_pte_offset(mm, address, psize);
@@ -6442,6 +6453,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* See Documentation/mm/mmu_notifier.rst
*/
i_mmap_unlock_write(vma->vm_file->f_mapping);
+ hugetlb_vma_unlock_write(vma);
mmu_notifier_invalidate_range_end(&range);
return pages << h->order;
@@ -6467,6 +6479,11 @@ bool hugetlb_reserve_pages(struct inode *inode,
}
/*
+ * vma specific semaphore used for pmd sharing synchronization
+ */
+ hugetlb_vma_lock_alloc(vma);
+
+ /*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* without using reserves
@@ -6489,12 +6506,11 @@ bool hugetlb_reserve_pages(struct inode *inode,
resv_map = inode_resv_map(inode);
chg = region_chg(resv_map, from, to, &regions_needed);
-
} else {
/* Private mapping. */
resv_map = resv_map_alloc();
if (!resv_map)
- return false;
+ goto out_err;
chg = to - from;
@@ -6589,6 +6605,7 @@ out_uncharge_cgroup:
hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
chg * pages_per_huge_page(h), h_cg);
out_err:
+ hugetlb_vma_lock_free(vma);
if (!vma || vma->vm_flags & VM_MAYSHARE)
/* Only call region_abort if the region_chg succeeded but the
* region_add failed or didn't run.
@@ -6658,35 +6675,37 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
/*
* match the virtual addresses, permission and the alignment of the
* page table page.
+ *
+ * Also, vma_lock (vm_private_data) is required for sharing.
*/
if (pmd_index(addr) != pmd_index(saddr) ||
vm_flags != svm_flags ||
- !range_in_vma(svma, sbase, s_end))
+ !range_in_vma(svma, sbase, s_end) ||
+ !svma->vm_private_data)
return 0;
return saddr;
}
-static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long base = addr & PUD_MASK;
- unsigned long end = base + PUD_SIZE;
-
- /*
- * check on proper vm_flags and page table alignment
- */
- if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
- return true;
- return false;
-}
-
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
{
+ unsigned long start = addr & PUD_MASK;
+ unsigned long end = start + PUD_SIZE;
+
#ifdef CONFIG_USERFAULTFD
if (uffd_disable_huge_pmd_share(vma))
return false;
#endif
- return vma_shareable(vma, addr);
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ return false;
+ if (!vma->vm_private_data) /* vma lock required for sharing */
+ return false;
+ if (!range_in_vma(vma, start, end))
+ return false;
+ return true;
}
/*
@@ -6716,16 +6735,157 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
*end = ALIGN(*end, PUD_SIZE);
}
+static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma)
+{
+ return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) &&
+ vma->vm_private_data;
+}
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ down_read(&vma_lock->rw_sema);
+ }
+}
+
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ up_read(&vma_lock->rw_sema);
+ }
+}
+
+void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ down_write(&vma_lock->rw_sema);
+ }
+}
+
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ up_write(&vma_lock->rw_sema);
+ }
+}
+
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ if (!__vma_shareable_flags_pmd(vma))
+ return 1;
+
+ return down_write_trylock(&vma_lock->rw_sema);
+}
+
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ lockdep_assert_held(&vma_lock->rw_sema);
+ }
+}
+
+void hugetlb_vma_lock_release(struct kref *kref)
+{
+ struct hugetlb_vma_lock *vma_lock = container_of(kref,
+ struct hugetlb_vma_lock, refs);
+
+ kfree(vma_lock);
+}
+
+void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
+{
+ struct vm_area_struct *vma = vma_lock->vma;
+
+ /*
+ * vma_lock structure may or not be released as a result of put,
+ * it certainly will no longer be attached to vma so clear pointer.
+ * Semaphore synchronizes access to vma_lock->vma field.
+ */
+ vma_lock->vma = NULL;
+ vma->vm_private_data = NULL;
+ up_write(&vma_lock->rw_sema);
+ kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
+}
+
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+ if (__vma_shareable_flags_pmd(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ __hugetlb_vma_unlock_write_put(vma_lock);
+ }
+}
+
+static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
+{
+ /*
+ * Only present in sharable vmas.
+ */
+ if (!vma || !__vma_shareable_flags_pmd(vma))
+ return;
+
+ if (vma->vm_private_data) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
+
+ down_write(&vma_lock->rw_sema);
+ __hugetlb_vma_unlock_write_put(vma_lock);
+ }
+}
+
+static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+ struct hugetlb_vma_lock *vma_lock;
+
+ /* Only establish in (flags) sharable vmas */
+ if (!vma || !(vma->vm_flags & VM_MAYSHARE))
+ return;
+
+ /* Should never get here with non-NULL vm_private_data */
+ if (vma->vm_private_data)
+ return;
+
+ vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
+ if (!vma_lock) {
+ /*
+ * If we can not allocate structure, then vma can not
+ * participate in pmd sharing. This is only a possible
+ * performance enhancement and memory saving issue.
+ * However, the lock is also used to synchronize page
+ * faults with truncation. If the lock is not present,
+ * unlikely races could leave pages in a file past i_size
+ * until the file is removed. Warn in the unlikely case of
+ * allocation failure.
+ */
+ pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
+ return;
+ }
+
+ kref_init(&vma_lock->refs);
+ init_rwsem(&vma_lock->rw_sema);
+ vma_lock->vma = vma;
+ vma->vm_private_data = vma_lock;
+}
+
/*
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner.
- *
- * This routine must be called with i_mmap_rwsem held in at least read mode if
- * sharing is possible. For hugetlbfs, this prevents removal of any page
- * table entries associated with the address space. This is important as we
- * are setting up sharing based on existing page table entries (mappings).
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_rwsem section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
*/
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
@@ -6739,7 +6899,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
pte_t *pte;
spinlock_t *ptl;
- i_mmap_assert_locked(mapping);
+ i_mmap_lock_read(mapping);
vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
if (svma == vma)
continue;
@@ -6769,6 +6929,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl);
out:
pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ i_mmap_unlock_read(mapping);
return pte;
}
@@ -6779,7 +6940,7 @@ out:
* indicated by page_count > 1, unmap is achieved by clearing pud and
* decrementing the ref count. If count == 1, the pte page is not shared.
*
- * Called with page table lock held and i_mmap_rwsem held in write mode.
+ * Called with page table lock held.
*
* returns: 1 successfully unmapped a shared pte page
* 0 the underlying pte page is not shared, or it is the last user
@@ -6792,6 +6953,7 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
pud_t *pud = pud_offset(p4d, addr);
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
+ hugetlb_vma_assert_locked(vma);
BUG_ON(page_count(virt_to_page(ptep)) == 0);
if (page_count(virt_to_page(ptep)) == 1)
return 0;
@@ -6803,6 +6965,48 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
}
#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+
+void hugetlb_vma_lock_read(struct vm_area_struct *vma)
+{
+}
+
+void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
+{
+}
+
+void hugetlb_vma_lock_write(struct vm_area_struct *vma)
+{
+}
+
+void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
+{
+}
+
+int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
+{
+ return 1;
+}
+
+void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
+{
+}
+
+void hugetlb_vma_lock_release(struct kref *kref)
+{
+}
+
+static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
+{
+}
+
+static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
+{
+}
+
+static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
+{
+}
+
pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pud_t *pud)
{
@@ -7173,6 +7377,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
start, end);
mmu_notifier_invalidate_range_start(&range);
+ hugetlb_vma_lock_write(vma);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (address = start; address < end; address += PUD_SIZE) {
ptep = huge_pte_offset(mm, address, sz);
@@ -7184,6 +7389,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
}
flush_hugetlb_tlb_range(vma, start, end);
i_mmap_unlock_write(vma->vm_file->f_mapping);
+ hugetlb_vma_unlock_write(vma);
/*
* No need to call mmu_notifier_invalidate_range(), see
* Documentation/mm/mmu_notifier.rst.
@@ -7334,7 +7540,7 @@ void __init hugetlb_cma_reserve(int order)
hugetlb_cma_size = 0;
}
-void __init hugetlb_cma_check(void)
+static void __init hugetlb_cma_check(void)
{
if (!hugetlb_cma_size || cma_reserve_called)
return;
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index c86691c431fd..f61d132df52b 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -75,11 +75,11 @@ parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{
- int idx;
+ struct hstate *h;
- for (idx = 0; idx < hugetlb_max_hstate; idx++) {
+ for_each_hstate(h) {
if (page_counter_read(
- hugetlb_cgroup_counter_from_cgroup(h_cg, idx)))
+ hugetlb_cgroup_counter_from_cgroup(h_cg, hstate_index(h))))
return true;
}
return false;
@@ -154,9 +154,9 @@ hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
* function.
*/
for_each_node(node) {
- /* Set node_to_alloc to -1 for offline nodes. */
+ /* Set node_to_alloc to NUMA_NO_NODE for offline nodes. */
int node_to_alloc =
- node_state(node, N_NORMAL_MEMORY) ? node : -1;
+ node_state(node, N_NORMAL_MEMORY) ? node : NUMA_NO_NODE;
h_cgroup->nodeinfo[node] =
kzalloc_node(sizeof(struct hugetlb_cgroup_per_node),
GFP_KERNEL, node_to_alloc);
@@ -225,17 +225,14 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h;
struct page *page;
- int idx;
do {
- idx = 0;
for_each_hstate(h) {
spin_lock_irq(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru)
- hugetlb_cgroup_move_parent(idx, h_cg, page);
+ hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page);
spin_unlock_irq(&hugetlb_lock);
- idx++;
}
cond_resched();
} while (hugetlb_cgroup_have_usage(h_cg));
@@ -442,7 +439,7 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
return;
- if (rg->reservation_counter && resv->pages_per_hpage && nr_pages > 0 &&
+ if (rg->reservation_counter && resv->pages_per_hpage &&
!resv->reservation_counter) {
page_counter_uncharge(rg->reservation_counter,
nr_pages * resv->pages_per_hpage);
@@ -675,12 +672,12 @@ static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of,
static char *mem_fmt(char *buf, int size, unsigned long hsize)
{
- if (hsize >= (1UL << 30))
- snprintf(buf, size, "%luGB", hsize >> 30);
- else if (hsize >= (1UL << 20))
- snprintf(buf, size, "%luMB", hsize >> 20);
+ if (hsize >= SZ_1G)
+ snprintf(buf, size, "%luGB", hsize / SZ_1G);
+ else if (hsize >= SZ_1M)
+ snprintf(buf, size, "%luMB", hsize / SZ_1M);
else
- snprintf(buf, size, "%luKB", hsize >> 10);
+ snprintf(buf, size, "%luKB", hsize / SZ_1K);
return buf;
}
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 20f414c0379f..ba2a2596fb4e 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -265,11 +265,10 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
static inline void reset_struct_pages(struct page *start)
{
- int i;
struct page *from = start + NR_RESET_STRUCT_PAGE;
- for (i = 0; i < NR_RESET_STRUCT_PAGE; i++)
- memcpy(start + i, from, sizeof(*from));
+ BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
+ memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
}
static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
@@ -287,6 +286,11 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
copy_page(to, (void *)walk->reuse_addr);
reset_struct_pages(to);
+ /*
+ * Makes sure that preceding stores to the page contents become visible
+ * before the set_pte_at() write.
+ */
+ smp_wmb();
set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
}
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 65e242b5a432..d0548e382b6b 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -63,13 +63,13 @@ static int hwpoison_unpoison(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(hwpoison_fops, NULL, hwpoison_inject, "%lli\n");
DEFINE_DEBUGFS_ATTRIBUTE(unpoison_fops, NULL, hwpoison_unpoison, "%lli\n");
-static void pfn_inject_exit(void)
+static void __exit pfn_inject_exit(void)
{
hwpoison_filter_enable = 0;
debugfs_remove_recursive(hwpoison_dir);
}
-static int pfn_inject_init(void)
+static int __init pfn_inject_init(void)
{
hwpoison_dir = debugfs_create_dir("hwpoison", NULL);
diff --git a/mm/init-mm.c b/mm/init-mm.c
index fbe7844d0912..c9327abb771c 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm_types.h>
-#include <linux/rbtree.h>
+#include <linux/maple_tree.h>
#include <linux/rwsem.h>
#include <linux/spinlock.h>
#include <linux/list.h>
@@ -28,7 +28,7 @@
* and size this cpu_bitmask to NR_CPUS.
*/
struct mm_struct init_mm = {
- .mm_rb = RB_ROOT,
+ .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
diff --git a/mm/internal.h b/mm/internal.h
index 785409805ed7..6b7ef495b56d 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -83,9 +83,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
+void folio_activate(struct folio *folio);
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
- unsigned long floor, unsigned long ceiling);
+void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *start_vma, unsigned long floor,
+ unsigned long ceiling);
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
struct zap_details;
@@ -187,7 +189,7 @@ extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason
/*
* in mm/rmap.c:
*/
-extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
+pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
/*
* in mm/page_alloc.c
@@ -365,7 +367,6 @@ extern int user_min_free_kbytes;
extern void free_unref_page(struct page *page, unsigned int order);
extern void free_unref_page_list(struct list_head *list);
-extern void zone_pcp_update(struct zone *zone, int cpu_online);
extern void zone_pcp_reset(struct zone *zone);
extern void zone_pcp_disable(struct zone *zone);
extern void zone_pcp_enable(struct zone *zone);
@@ -479,9 +480,6 @@ static inline bool is_data_mapping(vm_flags_t flags)
}
/* mm/util.c */
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev);
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma);
struct anon_vma *folio_anon_vma(struct folio *folio);
#ifdef CONFIG_MMU
@@ -639,34 +637,6 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
}
#endif /* !CONFIG_MMU */
-/*
- * Return the mem_map entry representing the 'offset' subpage within
- * the maximally aligned gigantic page 'base'. Handle any discontiguity
- * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
- */
-static inline struct page *mem_map_offset(struct page *base, int offset)
-{
- if (unlikely(offset >= MAX_ORDER_NR_PAGES))
- return nth_page(base, offset);
- return base + offset;
-}
-
-/*
- * Iterator over all subpages within the maximally aligned gigantic
- * page 'base'. Handle any discontiguity in the mem_map.
- */
-static inline struct page *mem_map_next(struct page *iter,
- struct page *base, int offset)
-{
- if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
- unsigned long pfn = page_to_pfn(base) + offset;
- if (!pfn_valid(pfn))
- return NULL;
- return pfn_to_page(pfn);
- }
- return iter + 1;
-}
-
/* Memory initialisation debug and verification */
enum mminit_level {
MMINIT_WARNING,
@@ -847,8 +817,14 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
}
#endif
+int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift);
+
void vunmap_range_noflush(unsigned long start, unsigned long end);
+void __vunmap_range_noflush(unsigned long start, unsigned long end);
+
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags);
@@ -860,8 +836,6 @@ int migrate_device_coherent_page(struct page *page);
*/
struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags);
-DECLARE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
-
extern bool mirrored_kernelcore;
static inline bool vma_soft_dirty_enabled(struct vm_area_struct *vma)
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 1f84df9c302e..d4837bff3b60 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -35,7 +35,15 @@ CFLAGS_shadow.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) -fno-builtin $(call cc-disable-warning, vla)
+
+CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST)
+CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
+
obj-y := common.o report.o
obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quarantine.o
obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o
obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o
+
+obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o
+obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 69f583855c8b..833bf2cfd2a3 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -30,13 +30,20 @@
#include "kasan.h"
#include "../slab.h"
+struct slab *kasan_addr_to_slab(const void *addr)
+{
+ if (virt_addr_valid(addr))
+ return virt_to_slab(addr);
+ return NULL;
+}
+
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
{
unsigned long entries[KASAN_STACK_DEPTH];
unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
- return __stack_depot_save(entries, nr_entries, flags, can_alloc);
+ return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc);
}
void kasan_set_track(struct kasan_track *track, gfp_t flags)
@@ -88,17 +95,6 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
-/*
- * Only allow cache merging when stack collection is disabled and no metadata
- * is present.
- */
-slab_flags_t __kasan_never_merge(void)
-{
- if (kasan_stack_collection_enabled())
- return SLAB_KASAN;
- return 0;
-}
-
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
@@ -121,132 +117,11 @@ void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
KASAN_PAGE_FREE, init);
}
-/*
- * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
- * For larger allocations larger redzones are used.
- */
-static inline unsigned int optimal_redzone(unsigned int object_size)
-{
- return
- object_size <= 64 - 16 ? 16 :
- object_size <= 128 - 32 ? 32 :
- object_size <= 512 - 64 ? 64 :
- object_size <= 4096 - 128 ? 128 :
- object_size <= (1 << 14) - 256 ? 256 :
- object_size <= (1 << 15) - 512 ? 512 :
- object_size <= (1 << 16) - 1024 ? 1024 : 2048;
-}
-
-void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
- slab_flags_t *flags)
-{
- unsigned int ok_size;
- unsigned int optimal_size;
-
- /*
- * SLAB_KASAN is used to mark caches as ones that are sanitized by
- * KASAN. Currently this flag is used in two places:
- * 1. In slab_ksize() when calculating the size of the accessible
- * memory within the object.
- * 2. In slab_common.c to prevent merging of sanitized caches.
- */
- *flags |= SLAB_KASAN;
-
- if (!kasan_stack_collection_enabled())
- return;
-
- ok_size = *size;
-
- /* Add alloc meta into redzone. */
- cache->kasan_info.alloc_meta_offset = *size;
- *size += sizeof(struct kasan_alloc_meta);
-
- /*
- * If alloc meta doesn't fit, don't add it.
- * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
- * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
- * larger sizes.
- */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.alloc_meta_offset = 0;
- *size = ok_size;
- /* Continue, since free meta might still fit. */
- }
-
- /* Only the generic mode uses free meta or flexible redzones. */
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- return;
- }
-
- /*
- * Add free meta into redzone when it's not possible to store
- * it in the object. This is the case when:
- * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
- * be touched after it was freed, or
- * 2. Object has a constructor, which means it's expected to
- * retain its content until the next allocation, or
- * 3. Object is too small.
- * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
- */
- if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
- cache->object_size < sizeof(struct kasan_free_meta)) {
- ok_size = *size;
-
- cache->kasan_info.free_meta_offset = *size;
- *size += sizeof(struct kasan_free_meta);
-
- /* If free meta doesn't fit, don't add it. */
- if (*size > KMALLOC_MAX_SIZE) {
- cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
- *size = ok_size;
- }
- }
-
- /* Calculate size with optimal redzone. */
- optimal_size = cache->object_size + optimal_redzone(cache->object_size);
- /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
- if (optimal_size > KMALLOC_MAX_SIZE)
- optimal_size = KMALLOC_MAX_SIZE;
- /* Use optimal size if the size with added metas is not large enough. */
- if (*size < optimal_size)
- *size = optimal_size;
-}
-
void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
{
cache->kasan_info.is_kmalloc = true;
}
-size_t __kasan_metadata_size(struct kmem_cache *cache)
-{
- if (!kasan_stack_collection_enabled())
- return 0;
- return (cache->kasan_info.alloc_meta_offset ?
- sizeof(struct kasan_alloc_meta) : 0) +
- (cache->kasan_info.free_meta_offset ?
- sizeof(struct kasan_free_meta) : 0);
-}
-
-struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
- const void *object)
-{
- if (!cache->kasan_info.alloc_meta_offset)
- return NULL;
- return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
-}
-
-#ifdef CONFIG_KASAN_GENERIC
-struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
- const void *object)
-{
- BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
- if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
- return NULL;
- return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
-}
-#endif
-
void __kasan_poison_slab(struct slab *slab)
{
struct page *page = slab_page(slab);
@@ -312,13 +187,9 @@ static inline u8 assign_tag(struct kmem_cache *cache,
void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
const void *object)
{
- struct kasan_alloc_meta *alloc_meta;
-
- if (kasan_stack_collection_enabled()) {
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- __memset(alloc_meta, 0, sizeof(*alloc_meta));
- }
+ /* Initialize per-object metadata if it is present. */
+ if (kasan_requires_meta())
+ kasan_init_object_meta(cache, object);
/* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
object = set_tag(object, assign_tag(cache, object, true));
@@ -329,13 +200,11 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
unsigned long ip, bool quarantine, bool init)
{
- u8 tag;
void *tagged_object;
if (!kasan_arch_is_ready())
return false;
- tag = get_tag(object);
tagged_object = object;
object = kasan_reset_tag(object);
@@ -364,7 +233,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
return false;
if (kasan_stack_collection_enabled())
- kasan_set_free_info(cache, object, tag);
+ kasan_save_free_info(cache, tagged_object);
return kasan_quarantine_put(cache, object);
}
@@ -423,20 +292,6 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
}
}
-static void set_alloc_info(struct kmem_cache *cache, void *object,
- gfp_t flags, bool is_kmalloc)
-{
- struct kasan_alloc_meta *alloc_meta;
-
- /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
- if (cache->kasan_info.is_kmalloc && !is_kmalloc)
- return;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta)
- kasan_set_track(&alloc_meta->alloc_track, flags);
-}
-
void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
void *object, gfp_t flags, bool init)
{
@@ -466,8 +321,8 @@ void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
kasan_unpoison(tagged_object, cache->object_size, init);
/* Save alloc info (if possible) for non-kmalloc() allocations. */
- if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags, false);
+ if (kasan_stack_collection_enabled() && !cache->kasan_info.is_kmalloc)
+ kasan_save_alloc_info(cache, tagged_object, flags);
return tagged_object;
}
@@ -512,8 +367,8 @@ static inline void *____kasan_kmalloc(struct kmem_cache *cache,
* Save alloc info (if possible) for kmalloc() allocations.
* This also rewrites the alloc info when called from kasan_krealloc().
*/
- if (kasan_stack_collection_enabled())
- set_alloc_info(cache, (void *)object, flags, true);
+ if (kasan_stack_collection_enabled() && cache->kasan_info.is_kmalloc)
+ kasan_save_alloc_info(cache, (void *)object, flags);
/* Keep the tag that was set by kasan_slab_alloc(). */
return (void *)object;
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 437fcc7e77cf..d8b5590f9484 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -328,6 +328,139 @@ DEFINE_ASAN_SET_SHADOW(f3);
DEFINE_ASAN_SET_SHADOW(f5);
DEFINE_ASAN_SET_SHADOW(f8);
+/* Only allow cache merging when no per-object metadata is present. */
+slab_flags_t kasan_never_merge(void)
+{
+ if (!kasan_requires_meta())
+ return 0;
+ return SLAB_KASAN;
+}
+
+/*
+ * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
+ * For larger allocations larger redzones are used.
+ */
+static inline unsigned int optimal_redzone(unsigned int object_size)
+{
+ return
+ object_size <= 64 - 16 ? 16 :
+ object_size <= 128 - 32 ? 32 :
+ object_size <= 512 - 64 ? 64 :
+ object_size <= 4096 - 128 ? 128 :
+ object_size <= (1 << 14) - 256 ? 256 :
+ object_size <= (1 << 15) - 512 ? 512 :
+ object_size <= (1 << 16) - 1024 ? 1024 : 2048;
+}
+
+void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
+ slab_flags_t *flags)
+{
+ unsigned int ok_size;
+ unsigned int optimal_size;
+
+ if (!kasan_requires_meta())
+ return;
+
+ /*
+ * SLAB_KASAN is used to mark caches that are sanitized by KASAN
+ * and that thus have per-object metadata.
+ * Currently this flag is used in two places:
+ * 1. In slab_ksize() to account for per-object metadata when
+ * calculating the size of the accessible memory within the object.
+ * 2. In slab_common.c via kasan_never_merge() to prevent merging of
+ * caches with per-object metadata.
+ */
+ *flags |= SLAB_KASAN;
+
+ ok_size = *size;
+
+ /* Add alloc meta into redzone. */
+ cache->kasan_info.alloc_meta_offset = *size;
+ *size += sizeof(struct kasan_alloc_meta);
+
+ /*
+ * If alloc meta doesn't fit, don't add it.
+ * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
+ * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
+ * larger sizes.
+ */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.alloc_meta_offset = 0;
+ *size = ok_size;
+ /* Continue, since free meta might still fit. */
+ }
+
+ /*
+ * Add free meta into redzone when it's not possible to store
+ * it in the object. This is the case when:
+ * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
+ * be touched after it was freed, or
+ * 2. Object has a constructor, which means it's expected to
+ * retain its content until the next allocation, or
+ * 3. Object is too small.
+ * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
+ */
+ if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
+ cache->object_size < sizeof(struct kasan_free_meta)) {
+ ok_size = *size;
+
+ cache->kasan_info.free_meta_offset = *size;
+ *size += sizeof(struct kasan_free_meta);
+
+ /* If free meta doesn't fit, don't add it. */
+ if (*size > KMALLOC_MAX_SIZE) {
+ cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
+ *size = ok_size;
+ }
+ }
+
+ /* Calculate size with optimal redzone. */
+ optimal_size = cache->object_size + optimal_redzone(cache->object_size);
+ /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
+ if (optimal_size > KMALLOC_MAX_SIZE)
+ optimal_size = KMALLOC_MAX_SIZE;
+ /* Use optimal size if the size with added metas is not large enough. */
+ if (*size < optimal_size)
+ *size = optimal_size;
+}
+
+struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
+ const void *object)
+{
+ if (!cache->kasan_info.alloc_meta_offset)
+ return NULL;
+ return (void *)object + cache->kasan_info.alloc_meta_offset;
+}
+
+struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
+ const void *object)
+{
+ BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
+ if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
+ return NULL;
+ return (void *)object + cache->kasan_info.free_meta_offset;
+}
+
+void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
+{
+ struct kasan_alloc_meta *alloc_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta)
+ __memset(alloc_meta, 0, sizeof(*alloc_meta));
+}
+
+size_t kasan_metadata_size(struct kmem_cache *cache)
+{
+ if (!kasan_requires_meta())
+ return 0;
+ return (cache->kasan_info.alloc_meta_offset ?
+ sizeof(struct kasan_alloc_meta) : 0) +
+ ((cache->kasan_info.free_meta_offset &&
+ cache->kasan_info.free_meta_offset != KASAN_NO_FREE_META) ?
+ sizeof(struct kasan_free_meta) : 0);
+}
+
static void __kasan_record_aux_stack(void *addr, bool can_alloc)
{
struct slab *slab = kasan_addr_to_slab(addr);
@@ -358,8 +491,16 @@ void kasan_record_aux_stack_noalloc(void *addr)
return __kasan_record_aux_stack(addr, false);
}
-void kasan_set_free_info(struct kmem_cache *cache,
- void *object, u8 tag)
+void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
+{
+ struct kasan_alloc_meta *alloc_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (alloc_meta)
+ kasan_set_track(&alloc_meta->alloc_track, flags);
+}
+
+void kasan_save_free_info(struct kmem_cache *cache, void *object)
{
struct kasan_free_meta *free_meta;
@@ -371,12 +512,3 @@ void kasan_set_free_info(struct kmem_cache *cache,
/* The object was freed and has free track set. */
*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK;
}
-
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag)
-{
- if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREETRACK)
- return NULL;
- /* Free meta must be present with KASAN_SLAB_FREETRACK. */
- return &kasan_get_free_meta(cache, object)->free_track;
-}
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 9ad8eff71b28..b22c4f461cb0 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -38,16 +38,9 @@ enum kasan_arg_vmalloc {
KASAN_ARG_VMALLOC_ON,
};
-enum kasan_arg_stacktrace {
- KASAN_ARG_STACKTRACE_DEFAULT,
- KASAN_ARG_STACKTRACE_OFF,
- KASAN_ARG_STACKTRACE_ON,
-};
-
static enum kasan_arg kasan_arg __ro_after_init;
static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
-static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
/*
* Whether KASAN is enabled at all.
@@ -66,9 +59,6 @@ EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to enable vmalloc tagging. */
DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
-/* Whether to collect alloc/free stack traces. */
-DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
-
/* kasan=off/on */
static int __init early_kasan_flag(char *arg)
{
@@ -122,23 +112,6 @@ static int __init early_kasan_flag_vmalloc(char *arg)
}
early_param("kasan.vmalloc", early_kasan_flag_vmalloc);
-/* kasan.stacktrace=off/on */
-static int __init early_kasan_flag_stacktrace(char *arg)
-{
- if (!arg)
- return -EINVAL;
-
- if (!strcmp(arg, "off"))
- kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
- else if (!strcmp(arg, "on"))
- kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
- else
- return -EINVAL;
-
- return 0;
-}
-early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
-
static inline const char *kasan_mode_info(void)
{
if (kasan_mode == KASAN_MODE_ASYNC)
@@ -213,17 +186,7 @@ void __init kasan_init_hw_tags(void)
break;
}
- switch (kasan_arg_stacktrace) {
- case KASAN_ARG_STACKTRACE_DEFAULT:
- /* Default is specified by kasan_flag_stacktrace definition. */
- break;
- case KASAN_ARG_STACKTRACE_OFF:
- static_branch_disable(&kasan_flag_stacktrace);
- break;
- case KASAN_ARG_STACKTRACE_ON:
- static_branch_enable(&kasan_flag_stacktrace);
- break;
- }
+ kasan_init_tags();
/* KASAN is now initialized, enable it. */
static_branch_enable(&kasan_flag_enabled);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 01c03e45acd4..abbcc1b0eec5 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -2,18 +2,37 @@
#ifndef __MM_KASAN_KASAN_H
#define __MM_KASAN_KASAN_H
+#include <linux/atomic.h>
#include <linux/kasan.h>
#include <linux/kasan-tags.h>
#include <linux/kfence.h>
#include <linux/stackdepot.h>
-#ifdef CONFIG_KASAN_HW_TAGS
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
#include <linux/static_key.h>
+
+DECLARE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
+
+static inline bool kasan_stack_collection_enabled(void)
+{
+ return static_branch_unlikely(&kasan_flag_stacktrace);
+}
+
+#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
+static inline bool kasan_stack_collection_enabled(void)
+{
+ return true;
+}
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+
#include "../slab.h"
DECLARE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
-DECLARE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
enum kasan_mode {
KASAN_MODE_SYNC,
@@ -28,11 +47,6 @@ static inline bool kasan_vmalloc_enabled(void)
return static_branch_likely(&kasan_flag_vmalloc);
}
-static inline bool kasan_stack_collection_enabled(void)
-{
- return static_branch_unlikely(&kasan_flag_stacktrace);
-}
-
static inline bool kasan_async_fault_possible(void)
{
return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
@@ -43,12 +57,7 @@ static inline bool kasan_sync_fault_possible(void)
return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
}
-#else
-
-static inline bool kasan_stack_collection_enabled(void)
-{
- return true;
-}
+#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_async_fault_possible(void)
{
@@ -60,7 +69,31 @@ static inline bool kasan_sync_fault_possible(void)
return true;
}
-#endif
+#endif /* CONFIG_KASAN_HW_TAGS */
+
+#ifdef CONFIG_KASAN_GENERIC
+
+/* Generic KASAN uses per-object metadata to store stack traces. */
+static inline bool kasan_requires_meta(void)
+{
+ /*
+ * Technically, Generic KASAN always collects stack traces right now.
+ * However, let's use kasan_stack_collection_enabled() in case the
+ * kasan.stacktrace command-line argument is changed to affect
+ * Generic KASAN.
+ */
+ return kasan_stack_collection_enabled();
+}
+
+#else /* CONFIG_KASAN_GENERIC */
+
+/* Tag-based KASAN modes do not use per-object metadata. */
+static inline bool kasan_requires_meta(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_KASAN_GENERIC */
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
@@ -122,6 +155,13 @@ static inline bool kasan_sync_fault_possible(void)
#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
#define META_ROWS_AROUND_ADDR 2
+#define KASAN_STACK_DEPTH 64
+
+struct kasan_track {
+ u32 pid;
+ depot_stack_handle_t stack;
+};
+
enum kasan_report_type {
KASAN_REPORT_ACCESS,
KASAN_REPORT_INVALID_FREE,
@@ -129,12 +169,22 @@ enum kasan_report_type {
};
struct kasan_report_info {
+ /* Filled in by kasan_report_*(). */
enum kasan_report_type type;
void *access_addr;
- void *first_bad_addr;
size_t access_size;
bool is_write;
unsigned long ip;
+
+ /* Filled in by the common reporting code. */
+ void *first_bad_addr;
+ struct kmem_cache *cache;
+ void *object;
+
+ /* Filled in by the mode-specific reporting code. */
+ const char *bug_type;
+ struct kasan_track alloc_track;
+ struct kasan_track free_track;
};
/* Do not change the struct layout: compiler ABI. */
@@ -160,33 +210,14 @@ struct kasan_global {
#endif
};
-/* Structures for keeping alloc and free tracks. */
+/* Structures for keeping alloc and free meta. */
-#define KASAN_STACK_DEPTH 64
-
-struct kasan_track {
- u32 pid;
- depot_stack_handle_t stack;
-};
-
-#if defined(CONFIG_KASAN_TAGS_IDENTIFY) && defined(CONFIG_KASAN_SW_TAGS)
-#define KASAN_NR_FREE_STACKS 5
-#else
-#define KASAN_NR_FREE_STACKS 1
-#endif
+#ifdef CONFIG_KASAN_GENERIC
struct kasan_alloc_meta {
struct kasan_track alloc_track;
- /* Generic mode stores free track in kasan_free_meta. */
-#ifdef CONFIG_KASAN_GENERIC
+ /* Free track is stored in kasan_free_meta. */
depot_stack_handle_t aux_stack[2];
-#else
- struct kasan_track free_track[KASAN_NR_FREE_STACKS];
-#endif
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- u8 free_pointer_tag[KASAN_NR_FREE_STACKS];
- u8 free_track_idx;
-#endif
};
struct qlist_node {
@@ -205,12 +236,31 @@ struct qlist_node {
* After that, slab allocator stores the freelist pointer in the object.
*/
struct kasan_free_meta {
-#ifdef CONFIG_KASAN_GENERIC
struct qlist_node quarantine_link;
struct kasan_track free_track;
-#endif
};
+#endif /* CONFIG_KASAN_GENERIC */
+
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+
+struct kasan_stack_ring_entry {
+ void *ptr;
+ size_t size;
+ u32 pid;
+ depot_stack_handle_t stack;
+ bool is_free;
+};
+
+struct kasan_stack_ring {
+ rwlock_t lock;
+ size_t size;
+ atomic64_t pos;
+ struct kasan_stack_ring_entry *entries;
+};
+
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
/* Used in KUnit-compatible KASAN tests. */
struct kunit_kasan_status {
@@ -219,13 +269,6 @@ struct kunit_kasan_status {
};
#endif
-struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
- const void *object);
-#ifdef CONFIG_KASAN_GENERIC
-struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
- const void *object);
-#endif
-
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
@@ -260,34 +303,50 @@ static inline bool addr_has_metadata(const void *addr)
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
+void *kasan_find_first_bad_addr(void *addr, size_t size);
+void kasan_complete_mode_report_info(struct kasan_report_info *info);
+void kasan_metadata_fetch_row(char *buffer, void *row);
+
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
void kasan_print_tags(u8 addr_tag, const void *addr);
#else
static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
#endif
-void *kasan_find_first_bad_addr(void *addr, size_t size);
-const char *kasan_get_bug_type(struct kasan_report_info *info);
-void kasan_metadata_fetch_row(char *buffer, void *row);
-
#if defined(CONFIG_KASAN_STACK)
void kasan_print_address_stack_frame(const void *addr);
#else
static inline void kasan_print_address_stack_frame(const void *addr) { }
#endif
+#ifdef CONFIG_KASAN_GENERIC
+void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object);
+#else
+static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { }
+#endif
+
bool kasan_report(unsigned long addr, size_t size,
bool is_write, unsigned long ip);
void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type);
-struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);
+#ifdef CONFIG_KASAN_GENERIC
+void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
+void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
+struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
+ const void *object);
+struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
+ const void *object);
+#else
+static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
+static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
+#endif
+
depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
void kasan_set_track(struct kasan_track *track, gfp_t flags);
-void kasan_set_free_info(struct kmem_cache *cache, void *object, u8 tag);
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag);
+void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
+void kasan_save_free_info(struct kmem_cache *cache, void *object);
#if defined(CONFIG_KASAN_GENERIC) && \
(defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
@@ -358,6 +417,10 @@ static inline void kasan_enable_tagging(void) { }
#endif /* CONFIG_KASAN_HW_TAGS */
+#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+void __init kasan_init_tags(void);
+#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
+
#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_force_async_fault(void);
diff --git a/lib/test_kasan.c b/mm/kasan/kasan_test.c
index 58c1b01ccfe2..f25692def781 100644
--- a/lib/test_kasan.c
+++ b/mm/kasan/kasan_test.c
@@ -25,7 +25,7 @@
#include <kunit/test.h>
-#include "../mm/kasan/kasan.h"
+#include "kasan.h"
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
@@ -612,6 +612,29 @@ again:
kfree(ptr2);
}
+/*
+ * Check that KASAN detects use-after-free when another object was allocated in
+ * the same slot. Relevant for the tag-based modes, which do not use quarantine.
+ */
+static void kmalloc_uaf3(struct kunit *test)
+{
+ char *ptr1, *ptr2;
+ size_t size = 100;
+
+ /* This test is specifically crafted for tag-based modes. */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
+
+ ptr1 = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
+ kfree(ptr1);
+
+ ptr2 = kmalloc(size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
+ kfree(ptr2);
+
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
+}
+
static void kfree_via_page(struct kunit *test)
{
char *ptr;
@@ -1382,6 +1405,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_uaf),
KUNIT_CASE(kmalloc_uaf_memset),
KUNIT_CASE(kmalloc_uaf2),
+ KUNIT_CASE(kmalloc_uaf3),
KUNIT_CASE(kfree_via_page),
KUNIT_CASE(kfree_via_phys),
KUNIT_CASE(kmem_cache_oob),
diff --git a/lib/test_kasan_module.c b/mm/kasan/kasan_test_module.c
index b112cbc835e9..e4ca82dc2c16 100644
--- a/lib/test_kasan_module.c
+++ b/mm/kasan/kasan_test_module.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include "../mm/kasan/kasan.h"
+#include "kasan.h"
static noinline void __init copy_user_test(void)
{
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index fe3f606b3a98..df3602062bfd 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -175,18 +175,14 @@ static void end_report(unsigned long *flags, void *addr)
static void print_error_description(struct kasan_report_info *info)
{
- if (info->type == KASAN_REPORT_INVALID_FREE) {
- pr_err("BUG: KASAN: invalid-free in %pS\n", (void *)info->ip);
- return;
- }
+ pr_err("BUG: KASAN: %s in %pS\n", info->bug_type, (void *)info->ip);
- if (info->type == KASAN_REPORT_DOUBLE_FREE) {
- pr_err("BUG: KASAN: double-free in %pS\n", (void *)info->ip);
+ if (info->type != KASAN_REPORT_ACCESS) {
+ pr_err("Free of addr %px by task %s/%d\n",
+ info->access_addr, current->comm, task_pid_nr(current));
return;
}
- pr_err("BUG: KASAN: %s in %pS\n",
- kasan_get_bug_type(info), (void *)info->ip);
if (info->access_size)
pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
@@ -200,31 +196,21 @@ static void print_error_description(struct kasan_report_info *info)
static void print_track(struct kasan_track *track, const char *prefix)
{
pr_err("%s by task %u:\n", prefix, track->pid);
- if (track->stack) {
+ if (track->stack)
stack_depot_print(track->stack);
- } else {
+ else
pr_err("(stack is not available)\n");
- }
}
-struct page *kasan_addr_to_page(const void *addr)
+static inline struct page *addr_to_page(const void *addr)
{
- if ((addr >= (void *)PAGE_OFFSET) &&
- (addr < high_memory))
+ if (virt_addr_valid(addr))
return virt_to_head_page(addr);
return NULL;
}
-struct slab *kasan_addr_to_slab(const void *addr)
-{
- if ((addr >= (void *)PAGE_OFFSET) &&
- (addr < high_memory))
- return virt_to_slab(addr);
- return NULL;
-}
-
-static void describe_object_addr(struct kmem_cache *cache, void *object,
- const void *addr)
+static void describe_object_addr(const void *addr, struct kmem_cache *cache,
+ void *object)
{
unsigned long access_addr = (unsigned long)addr;
unsigned long object_addr = (unsigned long)object;
@@ -252,46 +238,26 @@ static void describe_object_addr(struct kmem_cache *cache, void *object,
(void *)(object_addr + cache->object_size));
}
-static void describe_object_stacks(struct kmem_cache *cache, void *object,
- const void *addr, u8 tag)
+static void describe_object_stacks(struct kasan_report_info *info)
{
- struct kasan_alloc_meta *alloc_meta;
- struct kasan_track *free_track;
-
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (alloc_meta) {
- print_track(&alloc_meta->alloc_track, "Allocated");
+ if (info->alloc_track.stack) {
+ print_track(&info->alloc_track, "Allocated");
pr_err("\n");
}
- free_track = kasan_get_free_track(cache, object, tag);
- if (free_track) {
- print_track(free_track, "Freed");
+ if (info->free_track.stack) {
+ print_track(&info->free_track, "Freed");
pr_err("\n");
}
-#ifdef CONFIG_KASAN_GENERIC
- if (!alloc_meta)
- return;
- if (alloc_meta->aux_stack[0]) {
- pr_err("Last potentially related work creation:\n");
- stack_depot_print(alloc_meta->aux_stack[0]);
- pr_err("\n");
- }
- if (alloc_meta->aux_stack[1]) {
- pr_err("Second to last potentially related work creation:\n");
- stack_depot_print(alloc_meta->aux_stack[1]);
- pr_err("\n");
- }
-#endif
+ kasan_print_aux_stacks(info->cache, info->object);
}
-static void describe_object(struct kmem_cache *cache, void *object,
- const void *addr, u8 tag)
+static void describe_object(const void *addr, struct kasan_report_info *info)
{
if (kasan_stack_collection_enabled())
- describe_object_stacks(cache, object, addr, tag);
- describe_object_addr(cache, object, addr);
+ describe_object_stacks(info);
+ describe_object_addr(addr, info->cache, info->object);
}
static inline bool kernel_or_module_addr(const void *addr)
@@ -310,19 +276,16 @@ static inline bool init_task_stack_addr(const void *addr)
sizeof(init_thread_union.stack));
}
-static void print_address_description(void *addr, u8 tag)
+static void print_address_description(void *addr, u8 tag,
+ struct kasan_report_info *info)
{
- struct page *page = kasan_addr_to_page(addr);
+ struct page *page = addr_to_page(addr);
dump_stack_lvl(KERN_ERR);
pr_err("\n");
- if (page && PageSlab(page)) {
- struct slab *slab = page_slab(page);
- struct kmem_cache *cache = slab->slab_cache;
- void *object = nearest_obj(cache, slab, addr);
-
- describe_object(cache, object, addr, tag);
+ if (info->cache && info->object) {
+ describe_object(addr, info);
pr_err("\n");
}
@@ -420,23 +383,56 @@ static void print_memory_metadata(const void *addr)
static void print_report(struct kasan_report_info *info)
{
- void *tagged_addr = info->access_addr;
- void *untagged_addr = kasan_reset_tag(tagged_addr);
- u8 tag = get_tag(tagged_addr);
+ void *addr = kasan_reset_tag(info->access_addr);
+ u8 tag = get_tag(info->access_addr);
print_error_description(info);
- if (addr_has_metadata(untagged_addr))
+ if (addr_has_metadata(addr))
kasan_print_tags(tag, info->first_bad_addr);
pr_err("\n");
- if (addr_has_metadata(untagged_addr)) {
- print_address_description(untagged_addr, tag);
+ if (addr_has_metadata(addr)) {
+ print_address_description(addr, tag, info);
print_memory_metadata(info->first_bad_addr);
} else {
dump_stack_lvl(KERN_ERR);
}
}
+static void complete_report_info(struct kasan_report_info *info)
+{
+ void *addr = kasan_reset_tag(info->access_addr);
+ struct slab *slab;
+
+ if (info->type == KASAN_REPORT_ACCESS)
+ info->first_bad_addr = kasan_find_first_bad_addr(
+ info->access_addr, info->access_size);
+ else
+ info->first_bad_addr = addr;
+
+ slab = kasan_addr_to_slab(addr);
+ if (slab) {
+ info->cache = slab->slab_cache;
+ info->object = nearest_obj(info->cache, slab, addr);
+ } else
+ info->cache = info->object = NULL;
+
+ switch (info->type) {
+ case KASAN_REPORT_INVALID_FREE:
+ info->bug_type = "invalid-free";
+ break;
+ case KASAN_REPORT_DOUBLE_FREE:
+ info->bug_type = "double-free";
+ break;
+ default:
+ /* bug_type filled in by kasan_complete_mode_report_info. */
+ break;
+ }
+
+ /* Fill in mode-specific report info fields. */
+ kasan_complete_mode_report_info(info);
+}
+
void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_type type)
{
unsigned long flags;
@@ -452,13 +448,15 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty
start_report(&flags, true);
+ memset(&info, 0, sizeof(info));
info.type = type;
info.access_addr = ptr;
- info.first_bad_addr = kasan_reset_tag(ptr);
info.access_size = 0;
info.is_write = false;
info.ip = ip;
+ complete_report_info(&info);
+
print_report(&info);
end_report(&flags, ptr);
@@ -485,13 +483,15 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write,
start_report(&irq_flags, true);
+ memset(&info, 0, sizeof(info));
info.type = KASAN_REPORT_ACCESS;
info.access_addr = ptr;
- info.first_bad_addr = kasan_find_first_bad_addr(ptr, size);
info.access_size = size;
info.is_write = is_write;
info.ip = ip;
+ complete_report_info(&info);
+
print_report(&info);
end_report(&irq_flags, ptr);
diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c
index 6689fb9a919b..043c94b04605 100644
--- a/mm/kasan/report_generic.c
+++ b/mm/kasan/report_generic.c
@@ -109,7 +109,7 @@ static const char *get_wild_bug_type(struct kasan_report_info *info)
return bug_type;
}
-const char *kasan_get_bug_type(struct kasan_report_info *info)
+static const char *get_bug_type(struct kasan_report_info *info)
{
/*
* If access_size is a negative number, then it has reason to be
@@ -127,11 +127,55 @@ const char *kasan_get_bug_type(struct kasan_report_info *info)
return get_wild_bug_type(info);
}
+void kasan_complete_mode_report_info(struct kasan_report_info *info)
+{
+ struct kasan_alloc_meta *alloc_meta;
+ struct kasan_free_meta *free_meta;
+
+ if (!info->bug_type)
+ info->bug_type = get_bug_type(info);
+
+ if (!info->cache || !info->object)
+ return;
+
+ alloc_meta = kasan_get_alloc_meta(info->cache, info->object);
+ if (alloc_meta)
+ memcpy(&info->alloc_track, &alloc_meta->alloc_track,
+ sizeof(info->alloc_track));
+
+ if (*(u8 *)kasan_mem_to_shadow(info->object) == KASAN_SLAB_FREETRACK) {
+ /* Free meta must be present with KASAN_SLAB_FREETRACK. */
+ free_meta = kasan_get_free_meta(info->cache, info->object);
+ memcpy(&info->free_track, &free_meta->free_track,
+ sizeof(info->free_track));
+ }
+}
+
void kasan_metadata_fetch_row(char *buffer, void *row)
{
memcpy(buffer, kasan_mem_to_shadow(row), META_BYTES_PER_ROW);
}
+void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object)
+{
+ struct kasan_alloc_meta *alloc_meta;
+
+ alloc_meta = kasan_get_alloc_meta(cache, object);
+ if (!alloc_meta)
+ return;
+
+ if (alloc_meta->aux_stack[0]) {
+ pr_err("Last potentially related work creation:\n");
+ stack_depot_print(alloc_meta->aux_stack[0]);
+ pr_err("\n");
+ }
+ if (alloc_meta->aux_stack[1]) {
+ pr_err("Second to last potentially related work creation:\n");
+ stack_depot_print(alloc_meta->aux_stack[1]);
+ pr_err("\n");
+ }
+}
+
#ifdef CONFIG_KASAN_STACK
static bool __must_check tokenize_frame_descr(const char **frame_descr,
char *token, size_t max_tok_len,
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index e25d2166e813..ecede06ef374 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -4,38 +4,14 @@
* Copyright (c) 2020 Google, Inc.
*/
+#include <linux/atomic.h>
+
#include "kasan.h"
-#include "../slab.h"
-const char *kasan_get_bug_type(struct kasan_report_info *info)
-{
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- struct kasan_alloc_meta *alloc_meta;
- struct kmem_cache *cache;
- struct slab *slab;
- const void *addr;
- void *object;
- u8 tag;
- int i;
-
- tag = get_tag(info->access_addr);
- addr = kasan_reset_tag(info->access_addr);
- slab = kasan_addr_to_slab(addr);
- if (slab) {
- cache = slab->slab_cache;
- object = nearest_obj(cache, slab, (void *)addr);
- alloc_meta = kasan_get_alloc_meta(cache, object);
-
- if (alloc_meta) {
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- return "use-after-free";
- }
- }
- return "out-of-bounds";
- }
-#endif
+extern struct kasan_stack_ring stack_ring;
+static const char *get_common_bug_type(struct kasan_report_info *info)
+{
/*
* If access_size is a negative number, then it has reason to be
* defined as out-of-bounds bug type.
@@ -49,3 +25,92 @@ const char *kasan_get_bug_type(struct kasan_report_info *info)
return "invalid-access";
}
+
+void kasan_complete_mode_report_info(struct kasan_report_info *info)
+{
+ unsigned long flags;
+ u64 pos;
+ struct kasan_stack_ring_entry *entry;
+ void *ptr;
+ u32 pid;
+ depot_stack_handle_t stack;
+ bool is_free;
+ bool alloc_found = false, free_found = false;
+
+ if ((!info->cache || !info->object) && !info->bug_type) {
+ info->bug_type = get_common_bug_type(info);
+ return;
+ }
+
+ write_lock_irqsave(&stack_ring.lock, flags);
+
+ pos = atomic64_read(&stack_ring.pos);
+
+ /*
+ * The loop below tries to find stack ring entries relevant to the
+ * buggy object. This is a best-effort process.
+ *
+ * First, another object with the same tag can be allocated in place of
+ * the buggy object. Also, since the number of entries is limited, the
+ * entries relevant to the buggy object can be overwritten.
+ */
+
+ for (u64 i = pos - 1; i != pos - 1 - stack_ring.size; i--) {
+ if (alloc_found && free_found)
+ break;
+
+ entry = &stack_ring.entries[i % stack_ring.size];
+
+ /* Paired with smp_store_release() in save_stack_info(). */
+ ptr = (void *)smp_load_acquire(&entry->ptr);
+
+ if (kasan_reset_tag(ptr) != info->object ||
+ get_tag(ptr) != get_tag(info->access_addr))
+ continue;
+
+ pid = READ_ONCE(entry->pid);
+ stack = READ_ONCE(entry->stack);
+ is_free = READ_ONCE(entry->is_free);
+
+ if (is_free) {
+ /*
+ * Second free of the same object.
+ * Give up on trying to find the alloc entry.
+ */
+ if (free_found)
+ break;
+
+ info->free_track.pid = pid;
+ info->free_track.stack = stack;
+ free_found = true;
+
+ /*
+ * If a free entry is found first, the bug is likely
+ * a use-after-free.
+ */
+ if (!info->bug_type)
+ info->bug_type = "use-after-free";
+ } else {
+ /* Second alloc of the same object. Give up. */
+ if (alloc_found)
+ break;
+
+ info->alloc_track.pid = pid;
+ info->alloc_track.stack = stack;
+ alloc_found = true;
+
+ /*
+ * If an alloc entry is found first, the bug is likely
+ * an out-of-bounds.
+ */
+ if (!info->bug_type)
+ info->bug_type = "slab-out-of-bounds";
+ }
+ }
+
+ write_unlock_irqrestore(&stack_ring.lock, flags);
+
+ /* Assign the common bug type if no entries were found. */
+ if (!info->bug_type)
+ info->bug_type = get_common_bug_type(info);
+}
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index 77f13f391b57..a3afaf2ad1b1 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -42,7 +42,10 @@ void __init kasan_init_sw_tags(void)
for_each_possible_cpu(cpu)
per_cpu(prng_state, cpu) = (u32)get_cycles();
- pr_info("KernelAddressSanitizer initialized (sw-tags)\n");
+ kasan_init_tags();
+
+ pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n",
+ kasan_stack_collection_enabled() ? "on" : "off");
}
/*
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c
index 8f48b9502a17..67a222586846 100644
--- a/mm/kasan/tags.c
+++ b/mm/kasan/tags.c
@@ -6,9 +6,11 @@
* Copyright (c) 2020 Google, Inc.
*/
+#include <linux/atomic.h>
#include <linux/init.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/static_key.h>
@@ -16,44 +18,127 @@
#include <linux/types.h>
#include "kasan.h"
+#include "../slab.h"
-void kasan_set_free_info(struct kmem_cache *cache,
- void *object, u8 tag)
-{
- struct kasan_alloc_meta *alloc_meta;
- u8 idx = 0;
+#define KASAN_STACK_RING_SIZE_DEFAULT (32 << 10)
+
+enum kasan_arg_stacktrace {
+ KASAN_ARG_STACKTRACE_DEFAULT,
+ KASAN_ARG_STACKTRACE_OFF,
+ KASAN_ARG_STACKTRACE_ON,
+};
+
+static enum kasan_arg_stacktrace kasan_arg_stacktrace __initdata;
+
+/* Whether to collect alloc/free stack traces. */
+DEFINE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return;
+/* Non-zero, as initial pointer values are 0. */
+#define STACK_RING_BUSY_PTR ((void *)1)
+
+struct kasan_stack_ring stack_ring = {
+ .lock = __RW_LOCK_UNLOCKED(stack_ring.lock)
+};
+
+/* kasan.stacktrace=off/on */
+static int __init early_kasan_flag_stacktrace(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- idx = alloc_meta->free_track_idx;
- alloc_meta->free_pointer_tag[idx] = tag;
- alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
-#endif
+ if (!strcmp(arg, "off"))
+ kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_OFF;
+ else if (!strcmp(arg, "on"))
+ kasan_arg_stacktrace = KASAN_ARG_STACKTRACE_ON;
+ else
+ return -EINVAL;
- kasan_set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
+ return 0;
}
+early_param("kasan.stacktrace", early_kasan_flag_stacktrace);
-struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
- void *object, u8 tag)
+/* kasan.stack_ring_size=<number of entries> */
+static int __init early_kasan_flag_stack_ring_size(char *arg)
{
- struct kasan_alloc_meta *alloc_meta;
- int i = 0;
+ if (!arg)
+ return -EINVAL;
- alloc_meta = kasan_get_alloc_meta(cache, object);
- if (!alloc_meta)
- return NULL;
+ return kstrtoul(arg, 0, &stack_ring.size);
+}
+early_param("kasan.stack_ring_size", early_kasan_flag_stack_ring_size);
+
+void __init kasan_init_tags(void)
+{
+ switch (kasan_arg_stacktrace) {
+ case KASAN_ARG_STACKTRACE_DEFAULT:
+ /* Default is specified by kasan_flag_stacktrace definition. */
+ break;
+ case KASAN_ARG_STACKTRACE_OFF:
+ static_branch_disable(&kasan_flag_stacktrace);
+ break;
+ case KASAN_ARG_STACKTRACE_ON:
+ static_branch_enable(&kasan_flag_stacktrace);
+ break;
+ }
-#ifdef CONFIG_KASAN_TAGS_IDENTIFY
- for (i = 0; i < KASAN_NR_FREE_STACKS; i++) {
- if (alloc_meta->free_pointer_tag[i] == tag)
- break;
+ if (kasan_stack_collection_enabled()) {
+ if (!stack_ring.size)
+ stack_ring.size = KASAN_STACK_RING_SIZE_DEFAULT;
+ stack_ring.entries = memblock_alloc(
+ sizeof(stack_ring.entries[0]) * stack_ring.size,
+ SMP_CACHE_BYTES);
+ if (WARN_ON(!stack_ring.entries))
+ static_branch_disable(&kasan_flag_stacktrace);
}
- if (i == KASAN_NR_FREE_STACKS)
- i = alloc_meta->free_track_idx;
-#endif
+}
+
+static void save_stack_info(struct kmem_cache *cache, void *object,
+ gfp_t gfp_flags, bool is_free)
+{
+ unsigned long flags;
+ depot_stack_handle_t stack;
+ u64 pos;
+ struct kasan_stack_ring_entry *entry;
+ void *old_ptr;
+
+ stack = kasan_save_stack(gfp_flags, true);
+
+ /*
+ * Prevent save_stack_info() from modifying stack ring
+ * when kasan_complete_mode_report_info() is walking it.
+ */
+ read_lock_irqsave(&stack_ring.lock, flags);
+
+next:
+ pos = atomic64_fetch_add(1, &stack_ring.pos);
+ entry = &stack_ring.entries[pos % stack_ring.size];
+
+ /* Detect stack ring entry slots that are being written to. */
+ old_ptr = READ_ONCE(entry->ptr);
+ if (old_ptr == STACK_RING_BUSY_PTR)
+ goto next; /* Busy slot. */
+ if (!try_cmpxchg(&entry->ptr, &old_ptr, STACK_RING_BUSY_PTR))
+ goto next; /* Busy slot. */
+
+ WRITE_ONCE(entry->size, cache->object_size);
+ WRITE_ONCE(entry->pid, current->pid);
+ WRITE_ONCE(entry->stack, stack);
+ WRITE_ONCE(entry->is_free, is_free);
+
+ /*
+ * Paired with smp_load_acquire() in kasan_complete_mode_report_info().
+ */
+ smp_store_release(&entry->ptr, (s64)object);
- return &alloc_meta->free_track[i];
+ read_unlock_irqrestore(&stack_ring.lock, flags);
+}
+
+void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
+{
+ save_stack_info(cache, object, flags, false);
+}
+
+void kasan_save_free_info(struct kmem_cache *cache, void *object)
+{
+ save_stack_info(cache, object, GFP_NOWAIT, true);
}
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 239b1b4b094f..141788858b70 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -719,24 +719,13 @@ static int show_object(struct seq_file *seq, void *v)
return 0;
}
-static const struct seq_operations object_seqops = {
+static const struct seq_operations objects_sops = {
.start = start_object,
.next = next_object,
.stop = stop_object,
.show = show_object,
};
-
-static int open_objects(struct inode *inode, struct file *file)
-{
- return seq_open(file, &object_seqops);
-}
-
-static const struct file_operations objects_fops = {
- .open = open_objects,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(objects);
static int __init kfence_debugfs_init(void)
{
@@ -1003,6 +992,13 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
return NULL;
}
+ /*
+ * Skip allocations for this slab, if KFENCE has been disabled for
+ * this slab.
+ */
+ if (s->flags & SLAB_SKIP_KFENCE)
+ return NULL;
+
if (atomic_inc_return(&kfence_allocation_gate) > 1)
return NULL;
#ifdef CONFIG_KFENCE_STATIC_KEYS
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 01dbc6dbd599..4734315f7940 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -23,16 +23,20 @@
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
+#include "mm_slot.h"
enum scan_result {
SCAN_FAIL,
SCAN_SUCCEED,
SCAN_PMD_NULL,
+ SCAN_PMD_NONE,
+ SCAN_PMD_MAPPED,
SCAN_EXCEED_NONE_PTE,
SCAN_EXCEED_SWAP_PTE,
SCAN_EXCEED_SHARED_PTE,
SCAN_PTE_NON_PRESENT,
SCAN_PTE_UFFD_WP,
+ SCAN_PTE_MAPPED_HUGEPAGE,
SCAN_PAGE_RO,
SCAN_LACK_REFERENCED_PAGE,
SCAN_PAGE_NULL,
@@ -73,6 +77,8 @@ static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
* default collapse hugepages if there is at least one pte mapped like
* it would have happened if the vma was large enough during page
* fault.
+ *
+ * Note that these are only respected if collapse was initiated by khugepaged.
*/
static unsigned int khugepaged_max_ptes_none __read_mostly;
static unsigned int khugepaged_max_ptes_swap __read_mostly;
@@ -85,18 +91,24 @@ static struct kmem_cache *mm_slot_cache __read_mostly;
#define MAX_PTE_MAPPED_THP 8
+struct collapse_control {
+ bool is_khugepaged;
+
+ /* Num pages scanned per node */
+ u32 node_load[MAX_NUMNODES];
+
+ /* Last target selected in hpage_collapse_find_target_node() */
+ int last_target_node;
+};
+
/**
- * struct mm_slot - hash lookup from mm to mm_slot
- * @hash: hash collision list
- * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
- * @mm: the mm that this information is valid for
+ * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
+ * @slot: hash lookup from mm to mm_slot
* @nr_pte_mapped_thp: number of pte mapped THP
* @pte_mapped_thp: address array corresponding pte mapped THP
*/
-struct mm_slot {
- struct hlist_node hash;
- struct list_head mm_node;
- struct mm_struct *mm;
+struct khugepaged_mm_slot {
+ struct mm_slot slot;
/* pte-mapped THP in this mm */
int nr_pte_mapped_thp;
@@ -113,7 +125,7 @@ struct mm_slot {
*/
struct khugepaged_scan {
struct list_head mm_head;
- struct mm_slot *mm_slot;
+ struct khugepaged_mm_slot *mm_slot;
unsigned long address;
};
@@ -377,8 +389,9 @@ int hugepage_madvise(struct vm_area_struct *vma,
int __init khugepaged_init(void)
{
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
- sizeof(struct mm_slot),
- __alignof__(struct mm_slot), 0, NULL);
+ sizeof(struct khugepaged_mm_slot),
+ __alignof__(struct khugepaged_mm_slot),
+ 0, NULL);
if (!mm_slot_cache)
return -ENOMEM;
@@ -395,65 +408,38 @@ void __init khugepaged_destroy(void)
kmem_cache_destroy(mm_slot_cache);
}
-static inline struct mm_slot *alloc_mm_slot(void)
-{
- if (!mm_slot_cache) /* initialization failed */
- return NULL;
- return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
-}
-
-static inline void free_mm_slot(struct mm_slot *mm_slot)
-{
- kmem_cache_free(mm_slot_cache, mm_slot);
-}
-
-static struct mm_slot *get_mm_slot(struct mm_struct *mm)
-{
- struct mm_slot *mm_slot;
-
- hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
- if (mm == mm_slot->mm)
- return mm_slot;
-
- return NULL;
-}
-
-static void insert_to_mm_slots_hash(struct mm_struct *mm,
- struct mm_slot *mm_slot)
-{
- mm_slot->mm = mm;
- hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
-}
-
-static inline int khugepaged_test_exit(struct mm_struct *mm)
+static inline int hpage_collapse_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
void __khugepaged_enter(struct mm_struct *mm)
{
- struct mm_slot *mm_slot;
+ struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *slot;
int wakeup;
- mm_slot = alloc_mm_slot();
+ mm_slot = mm_slot_alloc(mm_slot_cache);
if (!mm_slot)
return;
+ slot = &mm_slot->slot;
+
/* __khugepaged_exit() must not run from under us */
- VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
+ VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm);
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
- free_mm_slot(mm_slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
return;
}
spin_lock(&khugepaged_mm_lock);
- insert_to_mm_slots_hash(mm, mm_slot);
+ mm_slot_insert(mm_slots_hash, mm, slot);
/*
* Insert just behind the scanning cursor, to let the area settle
* down a little.
*/
wakeup = list_empty(&khugepaged_scan.mm_head);
- list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
+ list_add_tail(&slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
mmgrab(mm);
@@ -466,37 +452,38 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (hugepage_vma_check(vma, vm_flags, false, false))
+ if (hugepage_vma_check(vma, vm_flags, false, false, true))
__khugepaged_enter(vma->vm_mm);
}
}
void __khugepaged_exit(struct mm_struct *mm)
{
- struct mm_slot *mm_slot;
+ struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *slot;
int free = 0;
spin_lock(&khugepaged_mm_lock);
- mm_slot = get_mm_slot(mm);
+ slot = mm_slot_lookup(mm_slots_hash, mm);
+ mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
- hash_del(&mm_slot->hash);
- list_del(&mm_slot->mm_node);
+ hash_del(&slot->hash);
+ list_del(&slot->mm_node);
free = 1;
}
spin_unlock(&khugepaged_mm_lock);
if (free) {
clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
- free_mm_slot(mm_slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
mmdrop(mm);
} else if (mm_slot) {
/*
* This is required to serialize against
- * khugepaged_test_exit() (which is guaranteed to run
- * under mmap sem read mode). Stop here (after we
- * return all pagetables will be destroyed) until
- * khugepaged has finished working on the pagetables
- * under the mmap_lock.
+ * hpage_collapse_test_exit() (which is guaranteed to run
+ * under mmap sem read mode). Stop here (after we return all
+ * pagetables will be destroyed) until khugepaged has finished
+ * working on the pagetables under the mmap_lock.
*/
mmap_write_lock(mm);
mmap_write_unlock(mm);
@@ -546,11 +533,12 @@ static bool is_refcount_suitable(struct page *page)
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte,
+ struct collapse_control *cc,
struct list_head *compound_pagelist)
{
struct page *page = NULL;
pte_t *_pte;
- int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
+ int none_or_zero = 0, shared = 0, result = SCAN_FAIL, referenced = 0;
bool writable = false;
for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
@@ -558,8 +546,10 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
pte_t pteval = *_pte;
if (pte_none(pteval) || (pte_present(pteval) &&
is_zero_pfn(pte_pfn(pteval)))) {
+ ++none_or_zero;
if (!userfaultfd_armed(vma) &&
- ++none_or_zero <= khugepaged_max_ptes_none) {
+ (!cc->is_khugepaged ||
+ none_or_zero <= khugepaged_max_ptes_none)) {
continue;
} else {
result = SCAN_EXCEED_NONE_PTE;
@@ -579,11 +569,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
VM_BUG_ON_PAGE(!PageAnon(page), page);
- if (page_mapcount(page) > 1 &&
- ++shared > khugepaged_max_ptes_shared) {
- result = SCAN_EXCEED_SHARED_PTE;
- count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
- goto out;
+ if (page_mapcount(page) > 1) {
+ ++shared;
+ if (cc->is_khugepaged &&
+ shared > khugepaged_max_ptes_shared) {
+ result = SCAN_EXCEED_SHARED_PTE;
+ count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
+ goto out;
+ }
}
if (PageCompound(page)) {
@@ -646,10 +639,14 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
if (PageCompound(page))
list_add_tail(&page->lru, compound_pagelist);
next:
- /* There should be enough young pte to collapse the page */
- if (pte_young(pteval) ||
- page_is_young(page) || PageReferenced(page) ||
- mmu_notifier_test_young(vma->vm_mm, address))
+ /*
+ * If collapse was initiated by khugepaged, check that there is
+ * enough young pte to justify collapsing the page
+ */
+ if (cc->is_khugepaged &&
+ (pte_young(pteval) || page_is_young(page) ||
+ PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+ address)))
referenced++;
if (pte_write(pteval))
@@ -658,19 +655,19 @@ next:
if (unlikely(!writable)) {
result = SCAN_PAGE_RO;
- } else if (unlikely(!referenced)) {
+ } else if (unlikely(cc->is_khugepaged && !referenced)) {
result = SCAN_LACK_REFERENCED_PAGE;
} else {
result = SCAN_SUCCEED;
trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result);
- return 1;
+ return result;
}
out:
release_pte_pages(pte, _pte, compound_pagelist);
trace_mm_collapse_huge_page_isolate(page, none_or_zero,
referenced, writable, result);
- return 0;
+ return result;
}
static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
@@ -735,9 +732,12 @@ static void khugepaged_alloc_sleep(void)
remove_wait_queue(&khugepaged_wait, &wait);
}
-static int khugepaged_node_load[MAX_NUMNODES];
+struct collapse_control khugepaged_collapse_control = {
+ .is_khugepaged = true,
+ .last_target_node = NUMA_NO_NODE,
+};
-static bool khugepaged_scan_abort(int nid)
+static bool hpage_collapse_scan_abort(int nid, struct collapse_control *cc)
{
int i;
@@ -749,11 +749,11 @@ static bool khugepaged_scan_abort(int nid)
return false;
/* If there is a count for this node already, it must be acceptable */
- if (khugepaged_node_load[nid])
+ if (cc->node_load[nid])
return false;
for (i = 0; i < MAX_NUMNODES; i++) {
- if (!khugepaged_node_load[i])
+ if (!cc->node_load[i])
continue;
if (node_distance(nid, i) > node_reclaim_distance)
return true;
@@ -772,146 +772,63 @@ static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
}
#ifdef CONFIG_NUMA
-static int khugepaged_find_target_node(void)
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
{
- static int last_khugepaged_target_node = NUMA_NO_NODE;
int nid, target_node = 0, max_value = 0;
/* find first node with max normal pages hit */
for (nid = 0; nid < MAX_NUMNODES; nid++)
- if (khugepaged_node_load[nid] > max_value) {
- max_value = khugepaged_node_load[nid];
+ if (cc->node_load[nid] > max_value) {
+ max_value = cc->node_load[nid];
target_node = nid;
}
/* do some balance if several nodes have the same hit record */
- if (target_node <= last_khugepaged_target_node)
- for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
- nid++)
- if (max_value == khugepaged_node_load[nid]) {
+ if (target_node <= cc->last_target_node)
+ for (nid = cc->last_target_node + 1; nid < MAX_NUMNODES;
+ nid++)
+ if (max_value == cc->node_load[nid]) {
target_node = nid;
break;
}
- last_khugepaged_target_node = target_node;
+ cc->last_target_node = target_node;
return target_node;
}
-
-static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
+#else
+static int hpage_collapse_find_target_node(struct collapse_control *cc)
{
- if (IS_ERR(*hpage)) {
- if (!*wait)
- return false;
-
- *wait = false;
- *hpage = NULL;
- khugepaged_alloc_sleep();
- } else if (*hpage) {
- put_page(*hpage);
- *hpage = NULL;
- }
-
- return true;
+ return 0;
}
+#endif
-static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
+static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node)
{
- VM_BUG_ON_PAGE(*hpage, *hpage);
-
*hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- *hpage = ERR_PTR(-ENOMEM);
- return NULL;
+ return false;
}
prep_transhuge_page(*hpage);
count_vm_event(THP_COLLAPSE_ALLOC);
- return *hpage;
-}
-#else
-static int khugepaged_find_target_node(void)
-{
- return 0;
-}
-
-static inline struct page *alloc_khugepaged_hugepage(void)
-{
- struct page *page;
-
- page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
- HPAGE_PMD_ORDER);
- if (page)
- prep_transhuge_page(page);
- return page;
-}
-
-static struct page *khugepaged_alloc_hugepage(bool *wait)
-{
- struct page *hpage;
-
- do {
- hpage = alloc_khugepaged_hugepage();
- if (!hpage) {
- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
- if (!*wait)
- return NULL;
-
- *wait = false;
- khugepaged_alloc_sleep();
- } else
- count_vm_event(THP_COLLAPSE_ALLOC);
- } while (unlikely(!hpage) && likely(hugepage_flags_enabled()));
-
- return hpage;
-}
-
-static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
-{
- /*
- * If the hpage allocated earlier was briefly exposed in page cache
- * before collapse_file() failed, it is possible that racing lookups
- * have not yet completed, and would then be unpleasantly surprised by
- * finding the hpage reused for the same mapping at a different offset.
- * Just release the previous allocation if there is any danger of that.
- */
- if (*hpage && page_count(*hpage) > 1) {
- put_page(*hpage);
- *hpage = NULL;
- }
-
- if (!*hpage)
- *hpage = khugepaged_alloc_hugepage(wait);
-
- if (unlikely(!*hpage))
- return false;
-
return true;
}
-static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
-{
- VM_BUG_ON(!*hpage);
-
- return *hpage;
-}
-#endif
-
/*
* If mmap_lock temporarily dropped, revalidate vma
* before taking mmap_lock.
- * Return 0 if succeeds, otherwise return none-zero
- * value (scan code).
+ * Returns enum scan_result value.
*/
static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
- struct vm_area_struct **vmap)
+ bool expect_anon,
+ struct vm_area_struct **vmap,
+ struct collapse_control *cc)
{
struct vm_area_struct *vma;
- if (unlikely(khugepaged_test_exit(mm)))
+ if (unlikely(hpage_collapse_test_exit(mm)))
return SCAN_ANY_PROCESS;
*vmap = vma = find_vma(mm, address);
@@ -920,7 +837,8 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!transhuge_vma_suitable(vma, address))
return SCAN_ADDRESS_RANGE;
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false))
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false,
+ cc->is_khugepaged))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
@@ -929,23 +847,62 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
* hugepage_vma_check may return true for qualified file
* vmas.
*/
- if (!vma->anon_vma || !vma_is_anonymous(vma))
- return SCAN_VMA_CHECK;
- return 0;
+ if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
+ return SCAN_PAGE_ANON;
+ return SCAN_SUCCEED;
+}
+
+static int find_pmd_or_thp_or_none(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t **pmd)
+{
+ pmd_t pmde;
+
+ *pmd = mm_find_pmd(mm, address);
+ if (!*pmd)
+ return SCAN_PMD_NULL;
+
+ pmde = pmd_read_atomic(*pmd);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /* See comments in pmd_none_or_trans_huge_or_clear_bad() */
+ barrier();
+#endif
+ if (pmd_none(pmde))
+ return SCAN_PMD_NONE;
+ if (pmd_trans_huge(pmde))
+ return SCAN_PMD_MAPPED;
+ if (pmd_bad(pmde))
+ return SCAN_PMD_NULL;
+ return SCAN_SUCCEED;
+}
+
+static int check_pmd_still_valid(struct mm_struct *mm,
+ unsigned long address,
+ pmd_t *pmd)
+{
+ pmd_t *new_pmd;
+ int result = find_pmd_or_thp_or_none(mm, address, &new_pmd);
+
+ if (result != SCAN_SUCCEED)
+ return result;
+ if (new_pmd != pmd)
+ return SCAN_FAIL;
+ return SCAN_SUCCEED;
}
/*
* Bring missing pages in from swap, to complete THP collapse.
- * Only done if khugepaged_scan_pmd believes it is worthwhile.
+ * Only done if hpage_collapse_scan_pmd believes it is worthwhile.
*
* Called and returns without pte mapped or spinlocks held.
* Note that if false is returned, mmap_lock will be released.
*/
-static bool __collapse_huge_page_swapin(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long haddr, pmd_t *pmd,
- int referenced)
+static int __collapse_huge_page_swapin(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long haddr, pmd_t *pmd,
+ int referenced)
{
int swapped_in = 0;
vm_fault_t ret = 0;
@@ -976,12 +933,13 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
*/
if (ret & VM_FAULT_RETRY) {
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
- return false;
+ /* Likely, but not guaranteed, that page lock failed */
+ return SCAN_PAGE_LOCK;
}
if (ret & VM_FAULT_ERROR) {
mmap_read_unlock(mm);
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
- return false;
+ return SCAN_FAIL;
}
swapped_in++;
}
@@ -991,30 +949,41 @@ static bool __collapse_huge_page_swapin(struct mm_struct *mm,
lru_add_drain();
trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
- return true;
+ return SCAN_SUCCEED;
}
-static void collapse_huge_page(struct mm_struct *mm,
- unsigned long address,
- struct page **hpage,
- int node, int referenced, int unmapped)
+static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+ struct collapse_control *cc)
+{
+ /* Only allocate from the target node */
+ gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
+ GFP_TRANSHUGE) | __GFP_THISNODE;
+ int node = hpage_collapse_find_target_node(cc);
+
+ if (!hpage_collapse_alloc_page(hpage, gfp, node))
+ return SCAN_ALLOC_HUGE_PAGE_FAIL;
+ if (unlikely(mem_cgroup_charge(page_folio(*hpage), mm, gfp)))
+ return SCAN_CGROUP_CHARGE_FAIL;
+ count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+ return SCAN_SUCCEED;
+}
+
+static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+ int referenced, int unmapped,
+ struct collapse_control *cc)
{
LIST_HEAD(compound_pagelist);
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
- struct page *new_page;
+ struct page *hpage;
spinlock_t *pmd_ptl, *pte_ptl;
- int isolated = 0, result = 0;
+ int result = SCAN_FAIL;
struct vm_area_struct *vma;
struct mmu_notifier_range range;
- gfp_t gfp;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- /* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-
/*
* Before allocating the hugepage, release the mmap_lock read lock.
* The allocation can take potentially a long time if it involves
@@ -1022,40 +991,34 @@ static void collapse_huge_page(struct mm_struct *mm,
* that. We will recheck the vma after taking it again in write mode.
*/
mmap_read_unlock(mm);
- new_page = khugepaged_alloc_page(hpage, gfp, node);
- if (!new_page) {
- result = SCAN_ALLOC_HUGE_PAGE_FAIL;
- goto out_nolock;
- }
- if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
- result = SCAN_CGROUP_CHARGE_FAIL;
+ result = alloc_charge_hpage(&hpage, mm, cc);
+ if (result != SCAN_SUCCEED)
goto out_nolock;
- }
- count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
mmap_read_lock(mm);
- result = hugepage_vma_revalidate(mm, address, &vma);
- if (result) {
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+ if (result != SCAN_SUCCEED) {
mmap_read_unlock(mm);
goto out_nolock;
}
- pmd = mm_find_pmd(mm, address);
- if (!pmd) {
- result = SCAN_PMD_NULL;
+ result = find_pmd_or_thp_or_none(mm, address, &pmd);
+ if (result != SCAN_SUCCEED) {
mmap_read_unlock(mm);
goto out_nolock;
}
- /*
- * __collapse_huge_page_swapin will return with mmap_lock released
- * when it fails. So we jump out_nolock directly in that case.
- * Continuing to collapse causes inconsistency.
- */
- if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
- pmd, referenced)) {
- goto out_nolock;
+ if (unmapped) {
+ /*
+ * __collapse_huge_page_swapin will return with mmap_lock
+ * released when it fails. So we jump out_nolock directly in
+ * that case. Continuing to collapse causes inconsistency.
+ */
+ result = __collapse_huge_page_swapin(mm, vma, address, pmd,
+ referenced);
+ if (result != SCAN_SUCCEED)
+ goto out_nolock;
}
mmap_read_unlock(mm);
@@ -1065,11 +1028,12 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock.
*/
mmap_write_lock(mm);
- result = hugepage_vma_revalidate(mm, address, &vma);
- if (result)
+ result = hugepage_vma_revalidate(mm, address, true, &vma, cc);
+ if (result != SCAN_SUCCEED)
goto out_up_write;
/* check if the pmd is still valid */
- if (mm_find_pmd(mm, address) != pmd)
+ result = check_pmd_still_valid(mm, address, pmd);
+ if (result != SCAN_SUCCEED)
goto out_up_write;
anon_vma_lock_write(vma->anon_vma);
@@ -1095,11 +1059,11 @@ static void collapse_huge_page(struct mm_struct *mm,
mmu_notifier_invalidate_range_end(&range);
spin_lock(pte_ptl);
- isolated = __collapse_huge_page_isolate(vma, address, pte,
- &compound_pagelist);
+ result = __collapse_huge_page_isolate(vma, address, pte, cc,
+ &compound_pagelist);
spin_unlock(pte_ptl);
- if (unlikely(!isolated)) {
+ if (unlikely(result != SCAN_SUCCEED)) {
pte_unmap(pte);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
@@ -1111,7 +1075,6 @@ static void collapse_huge_page(struct mm_struct *mm,
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
- result = SCAN_FAIL;
goto out_up_write;
}
@@ -1121,8 +1084,8 @@ static void collapse_huge_page(struct mm_struct *mm,
*/
anon_vma_unlock_write(vma->anon_vma);
- __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
- &compound_pagelist);
+ __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
+ &compound_pagelist);
pte_unmap(pte);
/*
* spin_lock() below is not the equivalent of smp_wmb(), but
@@ -1130,42 +1093,43 @@ static void collapse_huge_page(struct mm_struct *mm,
* avoid the copy_huge_page writes to become visible after
* the set_pmd_at() write.
*/
- __SetPageUptodate(new_page);
+ __SetPageUptodate(hpage);
pgtable = pmd_pgtable(_pmd);
- _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
+ _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
- page_add_new_anon_rmap(new_page, vma, address);
- lru_cache_add_inactive_or_unevictable(new_page, vma);
+ page_add_new_anon_rmap(hpage, vma, address);
+ lru_cache_add_inactive_or_unevictable(hpage, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
spin_unlock(pmd_ptl);
- *hpage = NULL;
+ hpage = NULL;
- khugepaged_pages_collapsed++;
result = SCAN_SUCCEED;
out_up_write:
mmap_write_unlock(mm);
out_nolock:
- if (!IS_ERR_OR_NULL(*hpage))
- mem_cgroup_uncharge(page_folio(*hpage));
- trace_mm_collapse_huge_page(mm, isolated, result);
- return;
+ if (hpage) {
+ mem_cgroup_uncharge(page_folio(hpage));
+ put_page(hpage);
+ }
+ trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
+ return result;
}
-static int khugepaged_scan_pmd(struct mm_struct *mm,
- struct vm_area_struct *vma,
- unsigned long address,
- struct page **hpage)
+static int hpage_collapse_scan_pmd(struct mm_struct *mm,
+ struct vm_area_struct *vma,
+ unsigned long address, bool *mmap_locked,
+ struct collapse_control *cc)
{
pmd_t *pmd;
pte_t *pte, *_pte;
- int ret = 0, result = 0, referenced = 0;
+ int result = SCAN_FAIL, referenced = 0;
int none_or_zero = 0, shared = 0;
struct page *page = NULL;
unsigned long _address;
@@ -1175,19 +1139,19 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- pmd = mm_find_pmd(mm, address);
- if (!pmd) {
- result = SCAN_PMD_NULL;
+ result = find_pmd_or_thp_or_none(mm, address, &pmd);
+ if (result != SCAN_SUCCEED)
goto out;
- }
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (is_swap_pte(pteval)) {
- if (++unmapped <= khugepaged_max_ptes_swap) {
+ ++unmapped;
+ if (!cc->is_khugepaged ||
+ unmapped <= khugepaged_max_ptes_swap) {
/*
* Always be strict with uffd-wp
* enabled swap entries. Please see
@@ -1205,8 +1169,10 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
}
}
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ ++none_or_zero;
if (!userfaultfd_armed(vma) &&
- ++none_or_zero <= khugepaged_max_ptes_none) {
+ (!cc->is_khugepaged ||
+ none_or_zero <= khugepaged_max_ptes_none)) {
continue;
} else {
result = SCAN_EXCEED_NONE_PTE;
@@ -1236,27 +1202,30 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
goto out_unmap;
}
- if (page_mapcount(page) > 1 &&
- ++shared > khugepaged_max_ptes_shared) {
- result = SCAN_EXCEED_SHARED_PTE;
- count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
- goto out_unmap;
+ if (page_mapcount(page) > 1) {
+ ++shared;
+ if (cc->is_khugepaged &&
+ shared > khugepaged_max_ptes_shared) {
+ result = SCAN_EXCEED_SHARED_PTE;
+ count_vm_event(THP_SCAN_EXCEED_SHARED_PTE);
+ goto out_unmap;
+ }
}
page = compound_head(page);
/*
* Record which node the original page is from and save this
- * information to khugepaged_node_load[].
+ * information to cc->node_load[].
* Khugepaged will allocate hugepage from the node has the max
* hit record.
*/
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
goto out_unmap;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
goto out_unmap;
@@ -1291,43 +1260,51 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
result = SCAN_PAGE_COUNT;
goto out_unmap;
}
- if (pte_young(pteval) ||
- page_is_young(page) || PageReferenced(page) ||
- mmu_notifier_test_young(vma->vm_mm, address))
+
+ /*
+ * If collapse was initiated by khugepaged, check that there is
+ * enough young pte to justify collapsing the page
+ */
+ if (cc->is_khugepaged &&
+ (pte_young(pteval) || page_is_young(page) ||
+ PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
+ address)))
referenced++;
}
if (!writable) {
result = SCAN_PAGE_RO;
- } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
+ } else if (cc->is_khugepaged &&
+ (!referenced ||
+ (unmapped && referenced < HPAGE_PMD_NR / 2))) {
result = SCAN_LACK_REFERENCED_PAGE;
} else {
result = SCAN_SUCCEED;
- ret = 1;
}
out_unmap:
pte_unmap_unlock(pte, ptl);
- if (ret) {
- node = khugepaged_find_target_node();
+ if (result == SCAN_SUCCEED) {
+ result = collapse_huge_page(mm, address, referenced,
+ unmapped, cc);
/* collapse_huge_page will return with the mmap_lock released */
- collapse_huge_page(mm, address, hpage, node,
- referenced, unmapped);
+ *mmap_locked = false;
}
out:
trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
none_or_zero, result, unmapped);
- return ret;
+ return result;
}
-static void collect_mm_slot(struct mm_slot *mm_slot)
+static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
{
- struct mm_struct *mm = mm_slot->mm;
+ struct mm_slot *slot = &mm_slot->slot;
+ struct mm_struct *mm = slot->mm;
lockdep_assert_held(&khugepaged_mm_lock);
- if (khugepaged_test_exit(mm)) {
+ if (hpage_collapse_test_exit(mm)) {
/* free mm_slot */
- hash_del(&mm_slot->hash);
- list_del(&mm_slot->mm_node);
+ hash_del(&slot->hash);
+ list_del(&slot->mm_node);
/*
* Not strictly needed because the mm exited already.
@@ -1336,7 +1313,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
*/
/* khugepaged_mm_lock actually not necessary for the below */
- free_mm_slot(mm_slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
mmdrop(mm);
}
}
@@ -1345,19 +1322,66 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
/*
* Notify khugepaged that given addr of the mm is pte-mapped THP. Then
* khugepaged should try to collapse the page table.
+ *
+ * Note that following race exists:
+ * (1) khugepaged calls khugepaged_collapse_pte_mapped_thps() for mm_struct A,
+ * emptying the A's ->pte_mapped_thp[] array.
+ * (2) MADV_COLLAPSE collapses some file extent with target mm_struct B, and
+ * retract_page_tables() finds a VMA in mm_struct A mapping the same extent
+ * (at virtual address X) and adds an entry (for X) into mm_struct A's
+ * ->pte-mapped_thp[] array.
+ * (3) khugepaged calls khugepaged_collapse_scan_file() for mm_struct A at X,
+ * sees a pte-mapped THP (SCAN_PTE_MAPPED_HUGEPAGE) and adds an entry
+ * (for X) into mm_struct A's ->pte-mapped_thp[] array.
+ * Thus, it's possible the same address is added multiple times for the same
+ * mm_struct. Should this happen, we'll simply attempt
+ * collapse_pte_mapped_thp() multiple times for the same address, under the same
+ * exclusive mmap_lock, and assuming the first call is successful, subsequent
+ * attempts will return quickly (without grabbing any additional locks) when
+ * a huge pmd is found in find_pmd_or_thp_or_none(). Since this is a cheap
+ * check, and since this is a rare occurrence, the cost of preventing this
+ * "multiple-add" is thought to be more expensive than just handling it, should
+ * it occur.
*/
-static void khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
+static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
unsigned long addr)
{
- struct mm_slot *mm_slot;
+ struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *slot;
+ bool ret = false;
VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
spin_lock(&khugepaged_mm_lock);
- mm_slot = get_mm_slot(mm);
- if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
+ slot = mm_slot_lookup(mm_slots_hash, mm);
+ mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
+ if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP)) {
mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
+ ret = true;
+ }
spin_unlock(&khugepaged_mm_lock);
+ return ret;
+}
+
+/* hpage must be locked, and mmap_lock must be held in write */
+static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t *pmdp, struct page *hpage)
+{
+ struct vm_fault vmf = {
+ .vma = vma,
+ .address = addr,
+ .flags = 0,
+ .pmd = pmdp,
+ };
+
+ VM_BUG_ON(!PageTransHuge(hpage));
+ mmap_assert_write_locked(vma->vm_mm);
+
+ if (do_set_pmd(&vmf, hpage))
+ return SCAN_FAIL;
+
+ get_page(hpage);
+ return SCAN_SUCCEED;
}
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -1381,52 +1405,80 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
*
* @mm: process address space where collapse happens
* @addr: THP collapse address
+ * @install_pmd: If a huge PMD should be installed
*
* This function checks whether all the PTEs in the PMD are pointing to the
* right THP. If so, retract the page table so the THP can refault in with
- * as pmd-mapped.
+ * as pmd-mapped. Possibly install a huge PMD mapping the THP.
*/
-void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
+int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
+ bool install_pmd)
{
unsigned long haddr = addr & HPAGE_PMD_MASK;
- struct vm_area_struct *vma = find_vma(mm, haddr);
+ struct vm_area_struct *vma = vma_lookup(mm, haddr);
struct page *hpage;
pte_t *start_pte, *pte;
pmd_t *pmd;
spinlock_t *ptl;
- int count = 0;
+ int count = 0, result = SCAN_FAIL;
int i;
+ mmap_assert_write_locked(mm);
+
+ /* Fast check before locking page if already PMD-mapped */
+ result = find_pmd_or_thp_or_none(mm, haddr, &pmd);
+ if (result == SCAN_PMD_MAPPED)
+ return result;
+
if (!vma || !vma->vm_file ||
!range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE))
- return;
+ return SCAN_VMA_CHECK;
/*
- * This vm_flags may not have VM_HUGEPAGE if the page was not
- * collapsed by this mm. But we can still collapse if the page is
- * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
- * will not fail the vma for missing VM_HUGEPAGE
+ * If we are here, we've succeeded in replacing all the native pages
+ * in the page cache with a single hugepage. If a mm were to fault-in
+ * this memory (mapped by a suitably aligned VMA), we'd get the hugepage
+ * and map it by a PMD, regardless of sysfs THP settings. As such, let's
+ * analogously elide sysfs THP settings here.
*/
- if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE, false, false))
- return;
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
if (userfaultfd_wp(vma))
- return;
+ return SCAN_PTE_UFFD_WP;
hpage = find_lock_page(vma->vm_file->f_mapping,
linear_page_index(vma, haddr));
if (!hpage)
- return;
+ return SCAN_PAGE_NULL;
+
+ if (!PageHead(hpage)) {
+ result = SCAN_FAIL;
+ goto drop_hpage;
+ }
- if (!PageHead(hpage))
+ if (compound_order(hpage) != HPAGE_PMD_ORDER) {
+ result = SCAN_PAGE_COMPOUND;
goto drop_hpage;
+ }
- pmd = mm_find_pmd(mm, haddr);
- if (!pmd)
+ switch (result) {
+ case SCAN_SUCCEED:
+ break;
+ case SCAN_PMD_NONE:
+ /*
+ * In MADV_COLLAPSE path, possible race with khugepaged where
+ * all pte entries have been removed and pmd cleared. If so,
+ * skip all the pte checks and just update the pmd mapping.
+ */
+ goto maybe_install_pmd;
+ default:
goto drop_hpage;
+ }
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
+ result = SCAN_FAIL;
/* step 1: check all mapped PTEs are to the right huge page */
for (i = 0, addr = haddr, pte = start_pte;
@@ -1438,8 +1490,10 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
continue;
/* page swapped out, abort */
- if (!pte_present(*pte))
+ if (!pte_present(*pte)) {
+ result = SCAN_PTE_NON_PRESENT;
goto abort;
+ }
page = vm_normal_page(vma, addr, *pte);
if (WARN_ON_ONCE(page && is_zone_device_page(page)))
@@ -1474,21 +1528,29 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
}
- /* step 4: collapse pmd */
+ /* step 4: remove pte entries */
collapse_and_free_pmd(mm, vma, haddr, pmd);
+
+maybe_install_pmd:
+ /* step 5: install pmd entry */
+ result = install_pmd
+ ? set_huge_pmd(vma, haddr, pmd, hpage)
+ : SCAN_SUCCEED;
+
drop_hpage:
unlock_page(hpage);
put_page(hpage);
- return;
+ return result;
abort:
pte_unmap_unlock(start_pte, ptl);
goto drop_hpage;
}
-static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
{
- struct mm_struct *mm = mm_slot->mm;
+ struct mm_slot *slot = &mm_slot->slot;
+ struct mm_struct *mm = slot->mm;
int i;
if (likely(mm_slot->nr_pte_mapped_thp == 0))
@@ -1497,26 +1559,33 @@ static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
if (!mmap_write_trylock(mm))
return;
- if (unlikely(khugepaged_test_exit(mm)))
+ if (unlikely(hpage_collapse_test_exit(mm)))
goto out;
for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
- collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
+ collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i], false);
out:
mm_slot->nr_pte_mapped_thp = 0;
mmap_write_unlock(mm);
}
-static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
+static int retract_page_tables(struct address_space *mapping, pgoff_t pgoff,
+ struct mm_struct *target_mm,
+ unsigned long target_addr, struct page *hpage,
+ struct collapse_control *cc)
{
struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long addr;
- pmd_t *pmd;
+ int target_result = SCAN_FAIL;
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
+ int result = SCAN_FAIL;
+ struct mm_struct *mm = NULL;
+ unsigned long addr = 0;
+ pmd_t *pmd;
+ bool is_target = false;
+
/*
* Check vma->anon_vma to exclude MAP_PRIVATE mappings that
* got written to. These VMAs are likely not worth investing
@@ -1533,25 +1602,34 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* ptl. It has higher chance to recover THP for the VMA, but
* has higher cost too.
*/
- if (vma->anon_vma)
- continue;
+ if (vma->anon_vma) {
+ result = SCAN_PAGE_ANON;
+ goto next;
+ }
addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
- if (addr & ~HPAGE_PMD_MASK)
- continue;
- if (vma->vm_end < addr + HPAGE_PMD_SIZE)
- continue;
+ if (addr & ~HPAGE_PMD_MASK ||
+ vma->vm_end < addr + HPAGE_PMD_SIZE) {
+ result = SCAN_VMA_CHECK;
+ goto next;
+ }
mm = vma->vm_mm;
- pmd = mm_find_pmd(mm, addr);
- if (!pmd)
- continue;
+ is_target = mm == target_mm && addr == target_addr;
+ result = find_pmd_or_thp_or_none(mm, addr, &pmd);
+ if (result != SCAN_SUCCEED)
+ goto next;
/*
* We need exclusive mmap_lock to retract page table.
*
* We use trylock due to lock inversion: we need to acquire
* mmap_lock while holding page lock. Fault path does it in
* reverse order. Trylock is a way to avoid deadlock.
+ *
+ * Also, it's not MADV_COLLAPSE's job to collapse other
+ * mappings - let khugepaged take care of them later.
*/
- if (mmap_write_trylock(mm)) {
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
+ if ((cc->is_khugepaged || is_target) &&
+ mmap_write_trylock(mm)) {
/*
* When a vma is registered with uffd-wp, we can't
* recycle the pmd pgtable because there can be pte
@@ -1560,25 +1638,48 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* it'll always mapped in small page size for uffd-wp
* registered ranges.
*/
- if (!khugepaged_test_exit(mm) && !userfaultfd_wp(vma))
- collapse_and_free_pmd(mm, vma, addr, pmd);
+ if (hpage_collapse_test_exit(mm)) {
+ result = SCAN_ANY_PROCESS;
+ goto unlock_next;
+ }
+ if (userfaultfd_wp(vma)) {
+ result = SCAN_PTE_UFFD_WP;
+ goto unlock_next;
+ }
+ collapse_and_free_pmd(mm, vma, addr, pmd);
+ if (!cc->is_khugepaged && is_target)
+ result = set_huge_pmd(vma, addr, pmd, hpage);
+ else
+ result = SCAN_SUCCEED;
+
+unlock_next:
mmap_write_unlock(mm);
- } else {
- /* Try again later */
+ goto next;
+ }
+ /*
+ * Calling context will handle target mm/addr. Otherwise, let
+ * khugepaged try again later.
+ */
+ if (!is_target) {
khugepaged_add_pte_mapped_thp(mm, addr);
+ continue;
}
+next:
+ if (is_target)
+ target_result = result;
}
i_mmap_unlock_write(mapping);
+ return target_result;
}
/**
* collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
*
* @mm: process address space where collapse happens
+ * @addr: virtual collapse start address
* @file: file that collapse on
* @start: collapse start address
- * @hpage: new allocated huge page for collapse
- * @node: appointed node the new huge page allocate from
+ * @cc: collapse context and scratchpad
*
* Basic scheme is simple, details are more complex:
* - allocate and lock a new huge page;
@@ -1595,13 +1696,12 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* + restore gaps in the page cache;
* + unlock and free huge page;
*/
-static void collapse_file(struct mm_struct *mm,
- struct file *file, pgoff_t start,
- struct page **hpage, int node)
+static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ struct file *file, pgoff_t start,
+ struct collapse_control *cc)
{
struct address_space *mapping = file->f_mapping;
- gfp_t gfp;
- struct page *new_page;
+ struct page *hpage;
pgoff_t index, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1612,20 +1712,9 @@ static void collapse_file(struct mm_struct *mm,
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
- /* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
-
- new_page = khugepaged_alloc_page(hpage, gfp, node);
- if (!new_page) {
- result = SCAN_ALLOC_HUGE_PAGE_FAIL;
- goto out;
- }
-
- if (unlikely(mem_cgroup_charge(page_folio(new_page), mm, gfp))) {
- result = SCAN_CGROUP_CHARGE_FAIL;
+ result = alloc_charge_hpage(&hpage, mm, cc);
+ if (result != SCAN_SUCCEED)
goto out;
- }
- count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
/*
* Ensure we have slots for all the pages in the range. This is
@@ -1643,14 +1732,14 @@ static void collapse_file(struct mm_struct *mm,
}
} while (1);
- __SetPageLocked(new_page);
+ __SetPageLocked(hpage);
if (is_shmem)
- __SetPageSwapBacked(new_page);
- new_page->index = start;
- new_page->mapping = mapping;
+ __SetPageSwapBacked(hpage);
+ hpage->index = start;
+ hpage->mapping = mapping;
/*
- * At this point the new_page is locked and not up-to-date.
+ * At this point the hpage is locked and not up-to-date.
* It's safe to insert it into the page cache, because nobody would
* be able to map it or use it in another way until we unlock it.
*/
@@ -1678,19 +1767,22 @@ static void collapse_file(struct mm_struct *mm,
result = SCAN_FAIL;
goto xa_locked;
}
- xas_store(&xas, new_page);
+ xas_store(&xas, hpage);
nr_none++;
continue;
}
if (xa_is_value(page) || !PageUptodate(page)) {
+ struct folio *folio;
+
xas_unlock_irq(&xas);
/* swap in or instantiate fallocated page */
- if (shmem_getpage(mapping->host, index, &page,
- SGP_NOALLOC)) {
+ if (shmem_get_folio(mapping->host, index,
+ &folio, SGP_NOALLOC)) {
result = SCAN_FAIL;
goto xa_unlocked;
}
+ page = folio_file_page(folio, index);
} else if (trylock_page(page)) {
get_page(page);
xas_unlock_irq(&xas);
@@ -1757,9 +1849,16 @@ static void collapse_file(struct mm_struct *mm,
/*
* If file was truncated then extended, or hole-punched, before
* we locked the first page, then a THP might be there already.
+ * This will be discovered on the first iteration.
*/
if (PageTransCompound(page)) {
- result = SCAN_PAGE_COMPOUND;
+ struct page *head = compound_head(page);
+
+ result = compound_order(head) == HPAGE_PMD_ORDER &&
+ head->index == start
+ /* Maybe PMD-mapped */
+ ? SCAN_PTE_MAPPED_HUGEPAGE
+ : SCAN_PAGE_COMPOUND;
goto out_unlock;
}
@@ -1820,19 +1919,19 @@ static void collapse_file(struct mm_struct *mm,
list_add_tail(&page->lru, &pagelist);
/* Finally, replace with the new page. */
- xas_store(&xas, new_page);
+ xas_store(&xas, hpage);
continue;
out_unlock:
unlock_page(page);
put_page(page);
goto xa_unlocked;
}
- nr = thp_nr_pages(new_page);
+ nr = thp_nr_pages(hpage);
if (is_shmem)
- __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
+ __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
else {
- __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
+ __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
@@ -1843,21 +1942,21 @@ out_unlock:
smp_mb();
if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL;
- __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
+ __mod_lruvec_page_state(hpage, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
goto xa_locked;
}
}
if (nr_none) {
- __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
+ __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
+ __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
}
/* Join all the small entries into a single multi-index entry */
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, new_page);
+ xas_store(&xas, hpage);
xa_locked:
xas_unlock_irq(&xas);
xa_unlocked:
@@ -1879,11 +1978,11 @@ xa_unlocked:
index = start;
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
while (index < page->index) {
- clear_highpage(new_page + (index % HPAGE_PMD_NR));
+ clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
- copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
- page);
+ copy_highpage(hpage + (page->index % HPAGE_PMD_NR),
+ page);
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
@@ -1894,23 +1993,23 @@ xa_unlocked:
index++;
}
while (index < end) {
- clear_highpage(new_page + (index % HPAGE_PMD_NR));
+ clear_highpage(hpage + (index % HPAGE_PMD_NR));
index++;
}
- SetPageUptodate(new_page);
- page_ref_add(new_page, HPAGE_PMD_NR - 1);
+ SetPageUptodate(hpage);
+ page_ref_add(hpage, HPAGE_PMD_NR - 1);
if (is_shmem)
- set_page_dirty(new_page);
- lru_cache_add(new_page);
+ set_page_dirty(hpage);
+ lru_cache_add(hpage);
/*
* Remove pte page tables, so we can re-fault the page as huge.
*/
- retract_page_tables(mapping, start);
- *hpage = NULL;
-
- khugepaged_pages_collapsed++;
+ result = retract_page_tables(mapping, start, mm, addr, hpage,
+ cc);
+ unlock_page(hpage);
+ hpage = NULL;
} else {
struct page *page;
@@ -1949,19 +2048,24 @@ xa_unlocked:
VM_BUG_ON(nr_none);
xas_unlock_irq(&xas);
- new_page->mapping = NULL;
+ hpage->mapping = NULL;
}
- unlock_page(new_page);
+ if (hpage)
+ unlock_page(hpage);
out:
VM_BUG_ON(!list_empty(&pagelist));
- if (!IS_ERR_OR_NULL(*hpage))
- mem_cgroup_uncharge(page_folio(*hpage));
+ if (hpage) {
+ mem_cgroup_uncharge(page_folio(hpage));
+ put_page(hpage);
+ }
/* TODO: tracepoints */
+ return result;
}
-static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
+ struct file *file, pgoff_t start,
+ struct collapse_control *cc)
{
struct page *page = NULL;
struct address_space *mapping = file->f_mapping;
@@ -1972,14 +2076,16 @@ static void khugepaged_scan_file(struct mm_struct *mm,
present = 0;
swap = 0;
- memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
+ memset(cc->node_load, 0, sizeof(cc->node_load));
rcu_read_lock();
xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
if (xas_retry(&xas, page))
continue;
if (xa_is_value(page)) {
- if (++swap > khugepaged_max_ptes_swap) {
+ ++swap;
+ if (cc->is_khugepaged &&
+ swap > khugepaged_max_ptes_swap) {
result = SCAN_EXCEED_SWAP_PTE;
count_vm_event(THP_SCAN_EXCEED_SWAP_PTE);
break;
@@ -1988,20 +2094,32 @@ static void khugepaged_scan_file(struct mm_struct *mm,
}
/*
- * XXX: khugepaged should compact smaller compound pages
+ * TODO: khugepaged should compact smaller compound pages
* into a PMD sized page
*/
if (PageTransCompound(page)) {
- result = SCAN_PAGE_COMPOUND;
+ struct page *head = compound_head(page);
+
+ result = compound_order(head) == HPAGE_PMD_ORDER &&
+ head->index == start
+ /* Maybe PMD-mapped */
+ ? SCAN_PTE_MAPPED_HUGEPAGE
+ : SCAN_PAGE_COMPOUND;
+ /*
+ * For SCAN_PTE_MAPPED_HUGEPAGE, further processing
+ * by the caller won't touch the page cache, and so
+ * it's safe to skip LRU and refcount checks before
+ * returning.
+ */
break;
}
node = page_to_nid(page);
- if (khugepaged_scan_abort(node)) {
+ if (hpage_collapse_scan_abort(node, cc)) {
result = SCAN_SCAN_ABORT;
break;
}
- khugepaged_node_load[node]++;
+ cc->node_load[node]++;
if (!PageLRU(page)) {
result = SCAN_PAGE_LRU;
@@ -2030,54 +2148,68 @@ static void khugepaged_scan_file(struct mm_struct *mm,
rcu_read_unlock();
if (result == SCAN_SUCCEED) {
- if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+ if (cc->is_khugepaged &&
+ present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
} else {
- node = khugepaged_find_target_node();
- collapse_file(mm, file, start, hpage, node);
+ result = collapse_file(mm, addr, file, start, cc);
}
}
- /* TODO: tracepoints */
+ trace_mm_khugepaged_scan_file(mm, page, file->f_path.dentry->d_iname,
+ present, swap, result);
+ return result;
}
#else
-static void khugepaged_scan_file(struct mm_struct *mm,
- struct file *file, pgoff_t start, struct page **hpage)
+static int hpage_collapse_scan_file(struct mm_struct *mm, unsigned long addr,
+ struct file *file, pgoff_t start,
+ struct collapse_control *cc)
{
BUILD_BUG();
}
-static void khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
+static void khugepaged_collapse_pte_mapped_thps(struct khugepaged_mm_slot *mm_slot)
{
}
+
+static bool khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
+ unsigned long addr)
+{
+ return false;
+}
#endif
-static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
- struct page **hpage)
+static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
+ struct collapse_control *cc)
__releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock)
{
- struct mm_slot *mm_slot;
+ struct vma_iterator vmi;
+ struct khugepaged_mm_slot *mm_slot;
+ struct mm_slot *slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int progress = 0;
VM_BUG_ON(!pages);
lockdep_assert_held(&khugepaged_mm_lock);
+ *result = SCAN_FAIL;
- if (khugepaged_scan.mm_slot)
+ if (khugepaged_scan.mm_slot) {
mm_slot = khugepaged_scan.mm_slot;
- else {
- mm_slot = list_entry(khugepaged_scan.mm_head.next,
+ slot = &mm_slot->slot;
+ } else {
+ slot = list_entry(khugepaged_scan.mm_head.next,
struct mm_slot, mm_node);
+ mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
khugepaged_scan.address = 0;
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
khugepaged_collapse_pte_mapped_thps(mm_slot);
- mm = mm_slot->mm;
+ mm = slot->mm;
/*
* Don't wait for semaphore (to avoid long wait times). Just move to
* the next mm on the list.
@@ -2085,19 +2217,21 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
vma = NULL;
if (unlikely(!mmap_read_trylock(mm)))
goto breakouterloop_mmap_lock;
- if (likely(!khugepaged_test_exit(mm)))
- vma = find_vma(mm, khugepaged_scan.address);
progress++;
- for (; vma; vma = vma->vm_next) {
+ if (unlikely(hpage_collapse_test_exit(mm)))
+ goto breakouterloop;
+
+ vma_iter_init(&vmi, mm, khugepaged_scan.address);
+ for_each_vma(vmi, vma) {
unsigned long hstart, hend;
cond_resched();
- if (unlikely(khugepaged_test_exit(mm))) {
+ if (unlikely(hpage_collapse_test_exit(mm))) {
progress++;
break;
}
- if (!hugepage_vma_check(vma, vma->vm_flags, false, false)) {
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false, true)) {
skip:
progress++;
continue;
@@ -2111,9 +2245,10 @@ skip:
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
while (khugepaged_scan.address < hend) {
- int ret;
+ bool mmap_locked = true;
+
cond_resched();
- if (unlikely(khugepaged_test_exit(mm)))
+ if (unlikely(hpage_collapse_test_exit(mm)))
goto breakouterloop;
VM_BUG_ON(khugepaged_scan.address < hstart ||
@@ -2125,19 +2260,48 @@ skip:
khugepaged_scan.address);
mmap_read_unlock(mm);
- ret = 1;
- khugepaged_scan_file(mm, file, pgoff, hpage);
+ *result = hpage_collapse_scan_file(mm,
+ khugepaged_scan.address,
+ file, pgoff, cc);
+ mmap_locked = false;
fput(file);
} else {
- ret = khugepaged_scan_pmd(mm, vma,
- khugepaged_scan.address,
- hpage);
+ *result = hpage_collapse_scan_pmd(mm, vma,
+ khugepaged_scan.address,
+ &mmap_locked,
+ cc);
+ }
+ switch (*result) {
+ case SCAN_PTE_MAPPED_HUGEPAGE: {
+ pmd_t *pmd;
+
+ *result = find_pmd_or_thp_or_none(mm,
+ khugepaged_scan.address,
+ &pmd);
+ if (*result != SCAN_SUCCEED)
+ break;
+ if (!khugepaged_add_pte_mapped_thp(mm,
+ khugepaged_scan.address))
+ break;
+ } fallthrough;
+ case SCAN_SUCCEED:
+ ++khugepaged_pages_collapsed;
+ break;
+ default:
+ break;
}
+
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
progress += HPAGE_PMD_NR;
- if (ret)
- /* we released mmap_lock so break loop */
+ if (!mmap_locked)
+ /*
+ * We released mmap_lock so break loop. Note
+ * that we drop mmap_lock before all hugepage
+ * allocations, so if allocation fails, we are
+ * guaranteed to break here and report the
+ * correct result back to caller.
+ */
goto breakouterloop_mmap_lock;
if (progress >= pages)
goto breakouterloop;
@@ -2153,16 +2317,17 @@ breakouterloop_mmap_lock:
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
*/
- if (khugepaged_test_exit(mm) || !vma) {
+ if (hpage_collapse_test_exit(mm) || !vma) {
/*
* Make sure that if mm_users is reaching zero while
* khugepaged runs here, khugepaged_exit will find
* mm_slot not pointing to the exiting mm.
*/
- if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
- khugepaged_scan.mm_slot = list_entry(
- mm_slot->mm_node.next,
- struct mm_slot, mm_node);
+ if (slot->mm_node.next != &khugepaged_scan.mm_head) {
+ slot = list_entry(slot->mm_node.next,
+ struct mm_slot, mm_node);
+ khugepaged_scan.mm_slot =
+ mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
khugepaged_scan.address = 0;
} else {
khugepaged_scan.mm_slot = NULL;
@@ -2187,19 +2352,16 @@ static int khugepaged_wait_event(void)
kthread_should_stop();
}
-static void khugepaged_do_scan(void)
+static void khugepaged_do_scan(struct collapse_control *cc)
{
- struct page *hpage = NULL;
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = READ_ONCE(khugepaged_pages_to_scan);
bool wait = true;
+ int result = SCAN_SUCCEED;
lru_add_drain_all();
- while (progress < pages) {
- if (!khugepaged_prealloc_page(&hpage, &wait))
- break;
-
+ while (true) {
cond_resched();
if (unlikely(kthread_should_stop() || try_to_freeze()))
@@ -2211,14 +2373,25 @@ static void khugepaged_do_scan(void)
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
- &hpage);
+ &result, cc);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
- }
- if (!IS_ERR_OR_NULL(hpage))
- put_page(hpage);
+ if (progress >= pages)
+ break;
+
+ if (result == SCAN_ALLOC_HUGE_PAGE_FAIL) {
+ /*
+ * If fail to allocate the first time, try to sleep for
+ * a while. When hit again, cancel the scan.
+ */
+ if (!wait)
+ break;
+ wait = false;
+ khugepaged_alloc_sleep();
+ }
+ }
}
static bool khugepaged_should_wakeup(void)
@@ -2249,13 +2422,13 @@ static void khugepaged_wait_work(void)
static int khugepaged(void *none)
{
- struct mm_slot *mm_slot;
+ struct khugepaged_mm_slot *mm_slot;
set_freezable();
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
- khugepaged_do_scan();
+ khugepaged_do_scan(&khugepaged_collapse_control);
khugepaged_wait_work();
}
@@ -2354,3 +2527,140 @@ void khugepaged_min_free_kbytes_update(void)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
+
+static int madvise_collapse_errno(enum scan_result r)
+{
+ /*
+ * MADV_COLLAPSE breaks from existing madvise(2) conventions to provide
+ * actionable feedback to caller, so they may take an appropriate
+ * fallback measure depending on the nature of the failure.
+ */
+ switch (r) {
+ case SCAN_ALLOC_HUGE_PAGE_FAIL:
+ return -ENOMEM;
+ case SCAN_CGROUP_CHARGE_FAIL:
+ return -EBUSY;
+ /* Resource temporary unavailable - trying again might succeed */
+ case SCAN_PAGE_LOCK:
+ case SCAN_PAGE_LRU:
+ case SCAN_DEL_PAGE_LRU:
+ return -EAGAIN;
+ /*
+ * Other: Trying again likely not to succeed / error intrinsic to
+ * specified memory range. khugepaged likely won't be able to collapse
+ * either.
+ */
+ default:
+ return -EINVAL;
+ }
+}
+
+int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
+ unsigned long start, unsigned long end)
+{
+ struct collapse_control *cc;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long hstart, hend, addr;
+ int thps = 0, last_fail = SCAN_FAIL;
+ bool mmap_locked = true;
+
+ BUG_ON(vma->vm_start > start);
+ BUG_ON(vma->vm_end < end);
+
+ *prev = vma;
+
+ if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
+ return -EINVAL;
+
+ cc = kmalloc(sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+ cc->is_khugepaged = false;
+ cc->last_target_node = NUMA_NO_NODE;
+
+ mmgrab(mm);
+ lru_add_drain_all();
+
+ hstart = (start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ hend = end & HPAGE_PMD_MASK;
+
+ for (addr = hstart; addr < hend; addr += HPAGE_PMD_SIZE) {
+ int result = SCAN_FAIL;
+
+ if (!mmap_locked) {
+ cond_resched();
+ mmap_read_lock(mm);
+ mmap_locked = true;
+ result = hugepage_vma_revalidate(mm, addr, false, &vma,
+ cc);
+ if (result != SCAN_SUCCEED) {
+ last_fail = result;
+ goto out_nolock;
+ }
+
+ hend = vma->vm_end & HPAGE_PMD_MASK;
+ }
+ mmap_assert_locked(mm);
+ memset(cc->node_load, 0, sizeof(cc->node_load));
+ if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
+ struct file *file = get_file(vma->vm_file);
+ pgoff_t pgoff = linear_page_index(vma, addr);
+
+ mmap_read_unlock(mm);
+ mmap_locked = false;
+ result = hpage_collapse_scan_file(mm, addr, file, pgoff,
+ cc);
+ fput(file);
+ } else {
+ result = hpage_collapse_scan_pmd(mm, vma, addr,
+ &mmap_locked, cc);
+ }
+ if (!mmap_locked)
+ *prev = NULL; /* Tell caller we dropped mmap_lock */
+
+handle_result:
+ switch (result) {
+ case SCAN_SUCCEED:
+ case SCAN_PMD_MAPPED:
+ ++thps;
+ break;
+ case SCAN_PTE_MAPPED_HUGEPAGE:
+ BUG_ON(mmap_locked);
+ BUG_ON(*prev);
+ mmap_write_lock(mm);
+ result = collapse_pte_mapped_thp(mm, addr, true);
+ mmap_write_unlock(mm);
+ goto handle_result;
+ /* Whitelisted set of results where continuing OK */
+ case SCAN_PMD_NULL:
+ case SCAN_PTE_NON_PRESENT:
+ case SCAN_PTE_UFFD_WP:
+ case SCAN_PAGE_RO:
+ case SCAN_LACK_REFERENCED_PAGE:
+ case SCAN_PAGE_NULL:
+ case SCAN_PAGE_COUNT:
+ case SCAN_PAGE_LOCK:
+ case SCAN_PAGE_COMPOUND:
+ case SCAN_PAGE_LRU:
+ case SCAN_DEL_PAGE_LRU:
+ last_fail = result;
+ break;
+ default:
+ last_fail = result;
+ /* Other error, exit */
+ goto out_maybelock;
+ }
+ }
+
+out_maybelock:
+ /* Caller expects us to hold mmap_lock on return */
+ if (!mmap_locked)
+ mmap_read_lock(mm);
+out_nolock:
+ mmap_assert_locked(mm);
+ mmdrop(mm);
+ kfree(cc);
+
+ return thps == ((hend - hstart) >> HPAGE_PMD_SHIFT) ? 0
+ : madvise_collapse_errno(last_fail);
+}
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 1eddc0132f7f..37af2dc8dac9 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -604,9 +604,8 @@ static int __save_stack_trace(unsigned long *trace)
* memory block and add it to the object_list and object_tree_root (or
* object_phys_tree_root).
*/
-static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp,
- bool is_phys)
+static void __create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp, bool is_phys)
{
unsigned long flags;
struct kmemleak_object *object, *parent;
@@ -618,7 +617,7 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
if (!object) {
pr_warn("Cannot allocate a kmemleak_object structure\n");
kmemleak_disable();
- return NULL;
+ return;
}
INIT_LIST_HEAD(&object->object_list);
@@ -687,7 +686,6 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
*/
dump_object_info(parent);
kmem_cache_free(object_cache, object);
- object = NULL;
goto out;
}
}
@@ -698,21 +696,20 @@ static struct kmemleak_object *__create_object(unsigned long ptr, size_t size,
list_add_tail_rcu(&object->object_list, &object_list);
out:
raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
- return object;
}
/* Create kmemleak object which allocated with virtual address. */
-static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp)
+static void create_object(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
{
- return __create_object(ptr, size, min_count, gfp, false);
+ __create_object(ptr, size, min_count, gfp, false);
}
/* Create kmemleak object which allocated with physical address. */
-static struct kmemleak_object *create_object_phys(unsigned long ptr, size_t size,
- int min_count, gfp_t gfp)
+static void create_object_phys(unsigned long ptr, size_t size,
+ int min_count, gfp_t gfp)
{
- return __create_object(ptr, size, min_count, gfp, true);
+ __create_object(ptr, size, min_count, gfp, true);
}
/*
diff --git a/mm/kmsan/Makefile b/mm/kmsan/Makefile
new file mode 100644
index 000000000000..98eab2856626
--- /dev/null
+++ b/mm/kmsan/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for KernelMemorySanitizer (KMSAN).
+#
+#
+obj-y := core.o instrumentation.o init.o hooks.o report.o shadow.o
+
+KMSAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+UBSAN_SANITIZE := n
+
+# Disable instrumentation of KMSAN runtime with other tools.
+CC_FLAGS_KMSAN_RUNTIME := -fno-stack-protector
+CC_FLAGS_KMSAN_RUNTIME += $(call cc-option,-fno-conserve-stack)
+CC_FLAGS_KMSAN_RUNTIME += -DDISABLE_BRANCH_PROFILING
+
+CFLAGS_REMOVE.o = $(CC_FLAGS_FTRACE)
+
+CFLAGS_core.o := $(CC_FLAGS_KMSAN_RUNTIME)
+CFLAGS_hooks.o := $(CC_FLAGS_KMSAN_RUNTIME)
+CFLAGS_init.o := $(CC_FLAGS_KMSAN_RUNTIME)
+CFLAGS_instrumentation.o := $(CC_FLAGS_KMSAN_RUNTIME)
+CFLAGS_report.o := $(CC_FLAGS_KMSAN_RUNTIME)
+CFLAGS_shadow.o := $(CC_FLAGS_KMSAN_RUNTIME)
+
+obj-$(CONFIG_KMSAN_KUNIT_TEST) += kmsan_test.o
+KMSAN_SANITIZE_kmsan_test.o := y
+CFLAGS_kmsan_test.o += $(call cc-disable-warning, uninitialized)
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
new file mode 100644
index 000000000000..112dce135c7f
--- /dev/null
+++ b/mm/kmsan/core.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN runtime library.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kmsan_types.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/mmzone.h>
+#include <linux/percpu-defs.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "../slab.h"
+#include "kmsan.h"
+
+bool kmsan_enabled __read_mostly;
+
+/*
+ * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
+ * unavaliable.
+ */
+DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
+
+void kmsan_internal_task_create(struct task_struct *task)
+{
+ struct kmsan_ctx *ctx = &task->kmsan_ctx;
+ struct thread_info *info = current_thread_info();
+
+ __memset(ctx, 0, sizeof(*ctx));
+ ctx->allow_reporting = true;
+ kmsan_internal_unpoison_memory(info, sizeof(*info), false);
+}
+
+void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
+ unsigned int poison_flags)
+{
+ u32 extra_bits =
+ kmsan_extra_bits(/*depth*/ 0, poison_flags & KMSAN_POISON_FREE);
+ bool checked = poison_flags & KMSAN_POISON_CHECK;
+ depot_stack_handle_t handle;
+
+ handle = kmsan_save_stack_with_flags(flags, extra_bits);
+ kmsan_internal_set_shadow_origin(address, size, -1, handle, checked);
+}
+
+void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked)
+{
+ kmsan_internal_set_shadow_origin(address, size, 0, 0, checked);
+}
+
+depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
+ unsigned int extra)
+{
+ unsigned long entries[KMSAN_STACK_DEPTH];
+ unsigned int nr_entries;
+
+ nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
+
+ /* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
+ flags &= ~__GFP_DIRECT_RECLAIM;
+
+ return __stack_depot_save(entries, nr_entries, extra, flags, true);
+}
+
+/* Copy the metadata following the memmove() behavior. */
+void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n)
+{
+ depot_stack_handle_t old_origin = 0, new_origin = 0;
+ int src_slots, dst_slots, i, iter, step, skip_bits;
+ depot_stack_handle_t *origin_src, *origin_dst;
+ void *shadow_src, *shadow_dst;
+ u32 *align_shadow_src, shadow;
+ bool backwards;
+
+ shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW);
+ if (!shadow_dst)
+ return;
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n));
+
+ shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW);
+ if (!shadow_src) {
+ /*
+ * @src is untracked: zero out destination shadow, ignore the
+ * origins, we're done.
+ */
+ __memset(shadow_dst, 0, n);
+ return;
+ }
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n));
+
+ __memmove(shadow_dst, shadow_src, n);
+
+ origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN);
+ origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN);
+ KMSAN_WARN_ON(!origin_dst || !origin_src);
+ src_slots = (ALIGN((u64)src + n, KMSAN_ORIGIN_SIZE) -
+ ALIGN_DOWN((u64)src, KMSAN_ORIGIN_SIZE)) /
+ KMSAN_ORIGIN_SIZE;
+ dst_slots = (ALIGN((u64)dst + n, KMSAN_ORIGIN_SIZE) -
+ ALIGN_DOWN((u64)dst, KMSAN_ORIGIN_SIZE)) /
+ KMSAN_ORIGIN_SIZE;
+ KMSAN_WARN_ON((src_slots < 1) || (dst_slots < 1));
+ KMSAN_WARN_ON((src_slots - dst_slots > 1) ||
+ (dst_slots - src_slots < -1));
+
+ backwards = dst > src;
+ i = backwards ? min(src_slots, dst_slots) - 1 : 0;
+ iter = backwards ? -1 : 1;
+
+ align_shadow_src =
+ (u32 *)ALIGN_DOWN((u64)shadow_src, KMSAN_ORIGIN_SIZE);
+ for (step = 0; step < min(src_slots, dst_slots); step++, i += iter) {
+ KMSAN_WARN_ON(i < 0);
+ shadow = align_shadow_src[i];
+ if (i == 0) {
+ /*
+ * If @src isn't aligned on KMSAN_ORIGIN_SIZE, don't
+ * look at the first @src % KMSAN_ORIGIN_SIZE bytes
+ * of the first shadow slot.
+ */
+ skip_bits = ((u64)src % KMSAN_ORIGIN_SIZE) * 8;
+ shadow = (shadow >> skip_bits) << skip_bits;
+ }
+ if (i == src_slots - 1) {
+ /*
+ * If @src + n isn't aligned on
+ * KMSAN_ORIGIN_SIZE, don't look at the last
+ * (@src + n) % KMSAN_ORIGIN_SIZE bytes of the
+ * last shadow slot.
+ */
+ skip_bits = (((u64)src + n) % KMSAN_ORIGIN_SIZE) * 8;
+ shadow = (shadow << skip_bits) >> skip_bits;
+ }
+ /*
+ * Overwrite the origin only if the corresponding
+ * shadow is nonempty.
+ */
+ if (origin_src[i] && (origin_src[i] != old_origin) && shadow) {
+ old_origin = origin_src[i];
+ new_origin = kmsan_internal_chain_origin(old_origin);
+ /*
+ * kmsan_internal_chain_origin() may return
+ * NULL, but we don't want to lose the previous
+ * origin value.
+ */
+ if (!new_origin)
+ new_origin = old_origin;
+ }
+ if (shadow)
+ origin_dst[i] = new_origin;
+ else
+ origin_dst[i] = 0;
+ }
+ /*
+ * If dst_slots is greater than src_slots (i.e.
+ * dst_slots == src_slots + 1), there is an extra origin slot at the
+ * beginning or end of the destination buffer, for which we take the
+ * origin from the previous slot.
+ * This is only done if the part of the source shadow corresponding to
+ * slot is non-zero.
+ *
+ * E.g. if we copy 8 aligned bytes that are marked as uninitialized
+ * and have origins o111 and o222, to an unaligned buffer with offset 1,
+ * these two origins are copied to three origin slots, so one of then
+ * needs to be duplicated, depending on the copy direction (@backwards)
+ *
+ * src shadow: |uuuu|uuuu|....|
+ * src origin: |o111|o222|....|
+ *
+ * backwards = 0:
+ * dst shadow: |.uuu|uuuu|u...|
+ * dst origin: |....|o111|o222| - fill the empty slot with o111
+ * backwards = 1:
+ * dst shadow: |.uuu|uuuu|u...|
+ * dst origin: |o111|o222|....| - fill the empty slot with o222
+ */
+ if (src_slots < dst_slots) {
+ if (backwards) {
+ shadow = align_shadow_src[src_slots - 1];
+ skip_bits = (((u64)dst + n) % KMSAN_ORIGIN_SIZE) * 8;
+ shadow = (shadow << skip_bits) >> skip_bits;
+ if (shadow)
+ /* src_slots > 0, therefore dst_slots is at least 2 */
+ origin_dst[dst_slots - 1] =
+ origin_dst[dst_slots - 2];
+ } else {
+ shadow = align_shadow_src[0];
+ skip_bits = ((u64)dst % KMSAN_ORIGIN_SIZE) * 8;
+ shadow = (shadow >> skip_bits) << skip_bits;
+ if (shadow)
+ origin_dst[0] = origin_dst[1];
+ }
+ }
+}
+
+depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
+{
+ unsigned long entries[3];
+ u32 extra_bits;
+ int depth;
+ bool uaf;
+
+ if (!id)
+ return id;
+ /*
+ * Make sure we have enough spare bits in @id to hold the UAF bit and
+ * the chain depth.
+ */
+ BUILD_BUG_ON(
+ (1 << STACK_DEPOT_EXTRA_BITS) <= (KMSAN_MAX_ORIGIN_DEPTH << 1));
+
+ extra_bits = stack_depot_get_extra_bits(id);
+ depth = kmsan_depth_from_eb(extra_bits);
+ uaf = kmsan_uaf_from_eb(extra_bits);
+
+ /*
+ * Stop chaining origins once the depth reached KMSAN_MAX_ORIGIN_DEPTH.
+ * This mostly happens in the case structures with uninitialized padding
+ * are copied around many times. Origin chains for such structures are
+ * usually periodic, and it does not make sense to fully store them.
+ */
+ if (depth == KMSAN_MAX_ORIGIN_DEPTH)
+ return id;
+
+ depth++;
+ extra_bits = kmsan_extra_bits(depth, uaf);
+
+ entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN;
+ entries[1] = kmsan_save_stack_with_flags(GFP_ATOMIC, 0);
+ entries[2] = id;
+ /*
+ * @entries is a local var in non-instrumented code, so KMSAN does not
+ * know it is initialized. Explicitly unpoison it to avoid false
+ * positives when __stack_depot_save() passes it to instrumented code.
+ */
+ kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
+ return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits,
+ GFP_ATOMIC, true);
+}
+
+void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
+ u32 origin, bool checked)
+{
+ u64 address = (u64)addr;
+ void *shadow_start;
+ u32 *origin_start;
+ size_t pad = 0;
+
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+ shadow_start = kmsan_get_metadata(addr, KMSAN_META_SHADOW);
+ if (!shadow_start) {
+ /*
+ * kmsan_metadata_is_contiguous() is true, so either all shadow
+ * and origin pages are NULL, or all are non-NULL.
+ */
+ if (checked) {
+ pr_err("%s: not memsetting %ld bytes starting at %px, because the shadow is NULL\n",
+ __func__, size, addr);
+ KMSAN_WARN_ON(true);
+ }
+ return;
+ }
+ __memset(shadow_start, b, size);
+
+ if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
+ pad = address % KMSAN_ORIGIN_SIZE;
+ address -= pad;
+ size += pad;
+ }
+ size = ALIGN(size, KMSAN_ORIGIN_SIZE);
+ origin_start =
+ (u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
+
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
+ origin_start[i] = origin;
+}
+
+struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
+{
+ struct page *page;
+
+ if (!kmsan_internal_is_vmalloc_addr(vaddr) &&
+ !kmsan_internal_is_module_addr(vaddr))
+ return NULL;
+ page = vmalloc_to_page(vaddr);
+ if (pfn_valid(page_to_pfn(page)))
+ return page;
+ else
+ return NULL;
+}
+
+void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
+ int reason)
+{
+ depot_stack_handle_t cur_origin = 0, new_origin = 0;
+ unsigned long addr64 = (unsigned long)addr;
+ depot_stack_handle_t *origin = NULL;
+ unsigned char *shadow = NULL;
+ int cur_off_start = -1;
+ int chunk_size;
+ size_t pos = 0;
+
+ if (!size)
+ return;
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
+ while (pos < size) {
+ chunk_size = min(size - pos,
+ PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE));
+ shadow = kmsan_get_metadata((void *)(addr64 + pos),
+ KMSAN_META_SHADOW);
+ if (!shadow) {
+ /*
+ * This page is untracked. If there were uninitialized
+ * bytes before, report them.
+ */
+ if (cur_origin) {
+ kmsan_enter_runtime();
+ kmsan_report(cur_origin, addr, size,
+ cur_off_start, pos - 1, user_addr,
+ reason);
+ kmsan_leave_runtime();
+ }
+ cur_origin = 0;
+ cur_off_start = -1;
+ pos += chunk_size;
+ continue;
+ }
+ for (int i = 0; i < chunk_size; i++) {
+ if (!shadow[i]) {
+ /*
+ * This byte is unpoisoned. If there were
+ * poisoned bytes before, report them.
+ */
+ if (cur_origin) {
+ kmsan_enter_runtime();
+ kmsan_report(cur_origin, addr, size,
+ cur_off_start, pos + i - 1,
+ user_addr, reason);
+ kmsan_leave_runtime();
+ }
+ cur_origin = 0;
+ cur_off_start = -1;
+ continue;
+ }
+ origin = kmsan_get_metadata((void *)(addr64 + pos + i),
+ KMSAN_META_ORIGIN);
+ KMSAN_WARN_ON(!origin);
+ new_origin = *origin;
+ /*
+ * Encountered new origin - report the previous
+ * uninitialized range.
+ */
+ if (cur_origin != new_origin) {
+ if (cur_origin) {
+ kmsan_enter_runtime();
+ kmsan_report(cur_origin, addr, size,
+ cur_off_start, pos + i - 1,
+ user_addr, reason);
+ kmsan_leave_runtime();
+ }
+ cur_origin = new_origin;
+ cur_off_start = pos + i;
+ }
+ }
+ pos += chunk_size;
+ }
+ KMSAN_WARN_ON(pos != size);
+ if (cur_origin) {
+ kmsan_enter_runtime();
+ kmsan_report(cur_origin, addr, size, cur_off_start, pos - 1,
+ user_addr, reason);
+ kmsan_leave_runtime();
+ }
+}
+
+bool kmsan_metadata_is_contiguous(void *addr, size_t size)
+{
+ char *cur_shadow = NULL, *next_shadow = NULL, *cur_origin = NULL,
+ *next_origin = NULL;
+ u64 cur_addr = (u64)addr, next_addr = cur_addr + PAGE_SIZE;
+ depot_stack_handle_t *origin_p;
+ bool all_untracked = false;
+
+ if (!size)
+ return true;
+
+ /* The whole range belongs to the same page. */
+ if (ALIGN_DOWN(cur_addr + size - 1, PAGE_SIZE) ==
+ ALIGN_DOWN(cur_addr, PAGE_SIZE))
+ return true;
+
+ cur_shadow = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ false);
+ if (!cur_shadow)
+ all_untracked = true;
+ cur_origin = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ true);
+ if (all_untracked && cur_origin)
+ goto report;
+
+ for (; next_addr < (u64)addr + size;
+ cur_addr = next_addr, cur_shadow = next_shadow,
+ cur_origin = next_origin, next_addr += PAGE_SIZE) {
+ next_shadow = kmsan_get_metadata((void *)next_addr, false);
+ next_origin = kmsan_get_metadata((void *)next_addr, true);
+ if (all_untracked) {
+ if (next_shadow || next_origin)
+ goto report;
+ if (!next_shadow && !next_origin)
+ continue;
+ }
+ if (((u64)cur_shadow == ((u64)next_shadow - PAGE_SIZE)) &&
+ ((u64)cur_origin == ((u64)next_origin - PAGE_SIZE)))
+ continue;
+ goto report;
+ }
+ return true;
+
+report:
+ pr_err("%s: attempting to access two shadow page ranges.\n", __func__);
+ pr_err("Access of size %ld at %px.\n", size, addr);
+ pr_err("Addresses belonging to different ranges: %px and %px\n",
+ (void *)cur_addr, (void *)next_addr);
+ pr_err("page[0].shadow: %px, page[1].shadow: %px\n", cur_shadow,
+ next_shadow);
+ pr_err("page[0].origin: %px, page[1].origin: %px\n", cur_origin,
+ next_origin);
+ origin_p = kmsan_get_metadata(addr, KMSAN_META_ORIGIN);
+ if (origin_p) {
+ pr_err("Origin: %08x\n", *origin_p);
+ kmsan_print_origin(*origin_p);
+ } else {
+ pr_err("Origin: unavailable\n");
+ }
+ return false;
+}
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
new file mode 100644
index 000000000000..35f6b6e6a908
--- /dev/null
+++ b/mm/kmsan/hooks.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN hooks for kernel subsystems.
+ *
+ * These functions handle creation of KMSAN metadata for memory allocations.
+ *
+ * Copyright (C) 2018-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include <linux/cacheflush.h>
+#include <linux/dma-direction.h>
+#include <linux/gfp.h>
+#include <linux/kmsan.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/usb.h>
+
+#include "../internal.h"
+#include "../slab.h"
+#include "kmsan.h"
+
+/*
+ * Instrumented functions shouldn't be called under
+ * kmsan_enter_runtime()/kmsan_leave_runtime(), because this will lead to
+ * skipping effects of functions like memset() inside instrumented code.
+ */
+
+void kmsan_task_create(struct task_struct *task)
+{
+ kmsan_enter_runtime();
+ kmsan_internal_task_create(task);
+ kmsan_leave_runtime();
+}
+
+void kmsan_task_exit(struct task_struct *task)
+{
+ struct kmsan_ctx *ctx = &task->kmsan_ctx;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ ctx->allow_reporting = false;
+}
+
+void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags)
+{
+ if (unlikely(object == NULL))
+ return;
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ /*
+ * There's a ctor or this is an RCU cache - do nothing. The memory
+ * status hasn't changed since last use.
+ */
+ if (s->ctor || (s->flags & SLAB_TYPESAFE_BY_RCU))
+ return;
+
+ kmsan_enter_runtime();
+ if (flags & __GFP_ZERO)
+ kmsan_internal_unpoison_memory(object, s->object_size,
+ KMSAN_POISON_CHECK);
+ else
+ kmsan_internal_poison_memory(object, s->object_size, flags,
+ KMSAN_POISON_CHECK);
+ kmsan_leave_runtime();
+}
+
+void kmsan_slab_free(struct kmem_cache *s, void *object)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ /* RCU slabs could be legally used after free within the RCU period */
+ if (unlikely(s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)))
+ return;
+ /*
+ * If there's a constructor, freed memory must remain in the same state
+ * until the next allocation. We cannot save its state to detect
+ * use-after-free bugs, instead we just keep it unpoisoned.
+ */
+ if (s->ctor)
+ return;
+ kmsan_enter_runtime();
+ kmsan_internal_poison_memory(object, s->object_size, GFP_KERNEL,
+ KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+ kmsan_leave_runtime();
+}
+
+void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
+{
+ if (unlikely(ptr == NULL))
+ return;
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ if (flags & __GFP_ZERO)
+ kmsan_internal_unpoison_memory((void *)ptr, size,
+ /*checked*/ true);
+ else
+ kmsan_internal_poison_memory((void *)ptr, size, flags,
+ KMSAN_POISON_CHECK);
+ kmsan_leave_runtime();
+}
+
+void kmsan_kfree_large(const void *ptr)
+{
+ struct page *page;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ page = virt_to_head_page((void *)ptr);
+ KMSAN_WARN_ON(ptr != page_address(page));
+ kmsan_internal_poison_memory((void *)ptr,
+ PAGE_SIZE << compound_order(page),
+ GFP_KERNEL,
+ KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+ kmsan_leave_runtime();
+}
+
+static unsigned long vmalloc_shadow(unsigned long addr)
+{
+ return (unsigned long)kmsan_get_metadata((void *)addr,
+ KMSAN_META_SHADOW);
+}
+
+static unsigned long vmalloc_origin(unsigned long addr)
+{
+ return (unsigned long)kmsan_get_metadata((void *)addr,
+ KMSAN_META_ORIGIN);
+}
+
+void kmsan_vunmap_range_noflush(unsigned long start, unsigned long end)
+{
+ __vunmap_range_noflush(vmalloc_shadow(start), vmalloc_shadow(end));
+ __vunmap_range_noflush(vmalloc_origin(start), vmalloc_origin(end));
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+}
+
+/*
+ * This function creates new shadow/origin pages for the physical pages mapped
+ * into the virtual memory. If those physical pages already had shadow/origin,
+ * those are ignored.
+ */
+void kmsan_ioremap_page_range(unsigned long start, unsigned long end,
+ phys_addr_t phys_addr, pgprot_t prot,
+ unsigned int page_shift)
+{
+ gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ struct page *shadow, *origin;
+ unsigned long off = 0;
+ int nr;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ kmsan_enter_runtime();
+ for (int i = 0; i < nr; i++, off += PAGE_SIZE) {
+ shadow = alloc_pages(gfp_mask, 1);
+ origin = alloc_pages(gfp_mask, 1);
+ __vmap_pages_range_noflush(
+ vmalloc_shadow(start + off),
+ vmalloc_shadow(start + off + PAGE_SIZE), prot, &shadow,
+ PAGE_SHIFT);
+ __vmap_pages_range_noflush(
+ vmalloc_origin(start + off),
+ vmalloc_origin(start + off + PAGE_SIZE), prot, &origin,
+ PAGE_SHIFT);
+ }
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ kmsan_leave_runtime();
+}
+
+void kmsan_iounmap_page_range(unsigned long start, unsigned long end)
+{
+ unsigned long v_shadow, v_origin;
+ struct page *shadow, *origin;
+ int nr;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ kmsan_enter_runtime();
+ v_shadow = (unsigned long)vmalloc_shadow(start);
+ v_origin = (unsigned long)vmalloc_origin(start);
+ for (int i = 0; i < nr;
+ i++, v_shadow += PAGE_SIZE, v_origin += PAGE_SIZE) {
+ shadow = kmsan_vmalloc_to_page_or_null((void *)v_shadow);
+ origin = kmsan_vmalloc_to_page_or_null((void *)v_origin);
+ __vunmap_range_noflush(v_shadow, vmalloc_shadow(end));
+ __vunmap_range_noflush(v_origin, vmalloc_origin(end));
+ if (shadow)
+ __free_pages(shadow, 1);
+ if (origin)
+ __free_pages(origin, 1);
+ }
+ flush_cache_vmap(vmalloc_shadow(start), vmalloc_shadow(end));
+ flush_cache_vmap(vmalloc_origin(start), vmalloc_origin(end));
+ kmsan_leave_runtime();
+}
+
+void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
+ size_t left)
+{
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ /*
+ * At this point we've copied the memory already. It's hard to check it
+ * before copying, as the size of actually copied buffer is unknown.
+ */
+
+ /* copy_to_user() may copy zero bytes. No need to check. */
+ if (!to_copy)
+ return;
+ /* Or maybe copy_to_user() failed to copy anything. */
+ if (to_copy <= left)
+ return;
+
+ ua_flags = user_access_save();
+ if ((u64)to < TASK_SIZE) {
+ /* This is a user memory access, check it. */
+ kmsan_internal_check_memory((void *)from, to_copy - left, to,
+ REASON_COPY_TO_USER);
+ } else {
+ /* Otherwise this is a kernel memory access. This happens when a
+ * compat syscall passes an argument allocated on the kernel
+ * stack to a real syscall.
+ * Don't check anything, just copy the shadow of the copied
+ * bytes.
+ */
+ kmsan_internal_memmove_metadata((void *)to, (void *)from,
+ to_copy - left);
+ }
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(kmsan_copy_to_user);
+
+/* Helper function to check an URB. */
+void kmsan_handle_urb(const struct urb *urb, bool is_out)
+{
+ if (!urb)
+ return;
+ if (is_out)
+ kmsan_internal_check_memory(urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ /*user_addr*/ 0, REASON_SUBMIT_URB);
+ else
+ kmsan_internal_unpoison_memory(urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ /*checked*/ false);
+}
+
+static void kmsan_handle_dma_page(const void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
+ REASON_ANY);
+ kmsan_internal_unpoison_memory((void *)addr, size,
+ /*checked*/ false);
+ break;
+ case DMA_TO_DEVICE:
+ kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
+ REASON_ANY);
+ break;
+ case DMA_FROM_DEVICE:
+ kmsan_internal_unpoison_memory((void *)addr, size,
+ /*checked*/ false);
+ break;
+ case DMA_NONE:
+ break;
+ }
+}
+
+/* Helper function to handle DMA data transfers. */
+void kmsan_handle_dma(struct page *page, size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ u64 page_offset, to_go, addr;
+
+ if (PageHighMem(page))
+ return;
+ addr = (u64)page_address(page) + offset;
+ /*
+ * The kernel may occasionally give us adjacent DMA pages not belonging
+ * to the same allocation. Process them separately to avoid triggering
+ * internal KMSAN checks.
+ */
+ while (size > 0) {
+ page_offset = addr % PAGE_SIZE;
+ to_go = min(PAGE_SIZE - page_offset, (u64)size);
+ kmsan_handle_dma_page((void *)addr, to_go, dir);
+ addr += to_go;
+ size -= to_go;
+ }
+}
+
+void kmsan_handle_dma_sg(struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *item;
+ int i;
+
+ for_each_sg(sg, item, nents, i)
+ kmsan_handle_dma(sg_page(item), item->offset, item->length,
+ dir);
+}
+
+/* Functions from kmsan-checks.h follow. */
+void kmsan_poison_memory(const void *address, size_t size, gfp_t flags)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ /* The users may want to poison/unpoison random memory. */
+ kmsan_internal_poison_memory((void *)address, size, flags,
+ KMSAN_POISON_NOCHECK);
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(kmsan_poison_memory);
+
+void kmsan_unpoison_memory(const void *address, size_t size)
+{
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ ua_flags = user_access_save();
+ kmsan_enter_runtime();
+ /* The users may want to poison/unpoison random memory. */
+ kmsan_internal_unpoison_memory((void *)address, size,
+ KMSAN_POISON_NOCHECK);
+ kmsan_leave_runtime();
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(kmsan_unpoison_memory);
+
+/*
+ * Version of kmsan_unpoison_memory() that can be called from within the KMSAN
+ * runtime.
+ *
+ * Non-instrumented IRQ entry functions receive struct pt_regs from assembly
+ * code. Those regs need to be unpoisoned, otherwise using them will result in
+ * false positives.
+ * Using kmsan_unpoison_memory() is not an option in entry code, because the
+ * return value of in_task() is inconsistent - as a result, certain calls to
+ * kmsan_unpoison_memory() are ignored. kmsan_unpoison_entry_regs() ensures that
+ * the registers are unpoisoned even if kmsan_in_runtime() is true in the early
+ * entry code.
+ */
+void kmsan_unpoison_entry_regs(const struct pt_regs *regs)
+{
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled)
+ return;
+
+ ua_flags = user_access_save();
+ kmsan_internal_unpoison_memory((void *)regs, sizeof(*regs),
+ KMSAN_POISON_NOCHECK);
+ user_access_restore(ua_flags);
+}
+
+void kmsan_check_memory(const void *addr, size_t size)
+{
+ if (!kmsan_enabled)
+ return;
+ return kmsan_internal_check_memory((void *)addr, size, /*user_addr*/ 0,
+ REASON_ANY);
+}
+EXPORT_SYMBOL(kmsan_check_memory);
diff --git a/mm/kmsan/init.c b/mm/kmsan/init.c
new file mode 100644
index 000000000000..7fb794242fad
--- /dev/null
+++ b/mm/kmsan/init.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN initialization routines.
+ *
+ * Copyright (C) 2017-2021 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include "kmsan.h"
+
+#include <asm/sections.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+
+#include "../internal.h"
+
+#define NUM_FUTURE_RANGES 128
+struct start_end_pair {
+ u64 start, end;
+};
+
+static struct start_end_pair start_end_pairs[NUM_FUTURE_RANGES] __initdata;
+static int future_index __initdata;
+
+/*
+ * Record a range of memory for which the metadata pages will be created once
+ * the page allocator becomes available.
+ */
+static void __init kmsan_record_future_shadow_range(void *start, void *end)
+{
+ u64 nstart = (u64)start, nend = (u64)end, cstart, cend;
+ bool merged = false;
+
+ KMSAN_WARN_ON(future_index == NUM_FUTURE_RANGES);
+ KMSAN_WARN_ON((nstart >= nend) || !nstart || !nend);
+ nstart = ALIGN_DOWN(nstart, PAGE_SIZE);
+ nend = ALIGN(nend, PAGE_SIZE);
+
+ /*
+ * Scan the existing ranges to see if any of them overlaps with
+ * [start, end). In that case, merge the two ranges instead of
+ * creating a new one.
+ * The number of ranges is less than 20, so there is no need to organize
+ * them into a more intelligent data structure.
+ */
+ for (int i = 0; i < future_index; i++) {
+ cstart = start_end_pairs[i].start;
+ cend = start_end_pairs[i].end;
+ if ((cstart < nstart && cend < nstart) ||
+ (cstart > nend && cend > nend))
+ /* ranges are disjoint - do not merge */
+ continue;
+ start_end_pairs[i].start = min(nstart, cstart);
+ start_end_pairs[i].end = max(nend, cend);
+ merged = true;
+ break;
+ }
+ if (merged)
+ return;
+ start_end_pairs[future_index].start = nstart;
+ start_end_pairs[future_index].end = nend;
+ future_index++;
+}
+
+/*
+ * Initialize the shadow for existing mappings during kernel initialization.
+ * These include kernel text/data sections, NODE_DATA and future ranges
+ * registered while creating other data (e.g. percpu).
+ *
+ * Allocations via memblock can be only done before slab is initialized.
+ */
+void __init kmsan_init_shadow(void)
+{
+ const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
+ phys_addr_t p_start, p_end;
+ u64 loop;
+ int nid;
+
+ for_each_reserved_mem_range(loop, &p_start, &p_end)
+ kmsan_record_future_shadow_range(phys_to_virt(p_start),
+ phys_to_virt(p_end));
+ /* Allocate shadow for .data */
+ kmsan_record_future_shadow_range(_sdata, _edata);
+
+ for_each_online_node(nid)
+ kmsan_record_future_shadow_range(
+ NODE_DATA(nid), (char *)NODE_DATA(nid) + nd_size);
+
+ for (int i = 0; i < future_index; i++)
+ kmsan_init_alloc_meta_for_range(
+ (void *)start_end_pairs[i].start,
+ (void *)start_end_pairs[i].end);
+}
+
+struct metadata_page_pair {
+ struct page *shadow, *origin;
+};
+static struct metadata_page_pair held_back[MAX_ORDER] __initdata;
+
+/*
+ * Eager metadata allocation. When the memblock allocator is freeing pages to
+ * pagealloc, we use 2/3 of them as metadata for the remaining 1/3.
+ * We store the pointers to the returned blocks of pages in held_back[] grouped
+ * by their order: when kmsan_memblock_free_pages() is called for the first
+ * time with a certain order, it is reserved as a shadow block, for the second
+ * time - as an origin block. On the third time the incoming block receives its
+ * shadow and origin ranges from the previously saved shadow and origin blocks,
+ * after which held_back[order] can be used again.
+ *
+ * At the very end there may be leftover blocks in held_back[]. They are
+ * collected later by kmsan_memblock_discard().
+ */
+bool kmsan_memblock_free_pages(struct page *page, unsigned int order)
+{
+ struct page *shadow, *origin;
+
+ if (!held_back[order].shadow) {
+ held_back[order].shadow = page;
+ return false;
+ }
+ if (!held_back[order].origin) {
+ held_back[order].origin = page;
+ return false;
+ }
+ shadow = held_back[order].shadow;
+ origin = held_back[order].origin;
+ kmsan_setup_meta(page, shadow, origin, order);
+
+ held_back[order].shadow = NULL;
+ held_back[order].origin = NULL;
+ return true;
+}
+
+#define MAX_BLOCKS 8
+struct smallstack {
+ struct page *items[MAX_BLOCKS];
+ int index;
+ int order;
+};
+
+static struct smallstack collect = {
+ .index = 0,
+ .order = MAX_ORDER,
+};
+
+static void smallstack_push(struct smallstack *stack, struct page *pages)
+{
+ KMSAN_WARN_ON(stack->index == MAX_BLOCKS);
+ stack->items[stack->index] = pages;
+ stack->index++;
+}
+#undef MAX_BLOCKS
+
+static struct page *smallstack_pop(struct smallstack *stack)
+{
+ struct page *ret;
+
+ KMSAN_WARN_ON(stack->index == 0);
+ stack->index--;
+ ret = stack->items[stack->index];
+ stack->items[stack->index] = NULL;
+ return ret;
+}
+
+static void do_collection(void)
+{
+ struct page *page, *shadow, *origin;
+
+ while (collect.index >= 3) {
+ page = smallstack_pop(&collect);
+ shadow = smallstack_pop(&collect);
+ origin = smallstack_pop(&collect);
+ kmsan_setup_meta(page, shadow, origin, collect.order);
+ __free_pages_core(page, collect.order);
+ }
+}
+
+static void collect_split(void)
+{
+ struct smallstack tmp = {
+ .order = collect.order - 1,
+ .index = 0,
+ };
+ struct page *page;
+
+ if (!collect.order)
+ return;
+ while (collect.index) {
+ page = smallstack_pop(&collect);
+ smallstack_push(&tmp, &page[0]);
+ smallstack_push(&tmp, &page[1 << tmp.order]);
+ }
+ __memcpy(&collect, &tmp, sizeof(tmp));
+}
+
+/*
+ * Memblock is about to go away. Split the page blocks left over in held_back[]
+ * and return 1/3 of that memory to the system.
+ */
+static void kmsan_memblock_discard(void)
+{
+ /*
+ * For each order=N:
+ * - push held_back[N].shadow and .origin to @collect;
+ * - while there are >= 3 elements in @collect, do garbage collection:
+ * - pop 3 ranges from @collect;
+ * - use two of them as shadow and origin for the third one;
+ * - repeat;
+ * - split each remaining element from @collect into 2 ranges of
+ * order=N-1,
+ * - repeat.
+ */
+ collect.order = MAX_ORDER - 1;
+ for (int i = MAX_ORDER - 1; i >= 0; i--) {
+ if (held_back[i].shadow)
+ smallstack_push(&collect, held_back[i].shadow);
+ if (held_back[i].origin)
+ smallstack_push(&collect, held_back[i].origin);
+ held_back[i].shadow = NULL;
+ held_back[i].origin = NULL;
+ do_collection();
+ collect_split();
+ }
+}
+
+void __init kmsan_init_runtime(void)
+{
+ /* Assuming current is init_task */
+ kmsan_internal_task_create(current);
+ kmsan_memblock_discard();
+ pr_info("Starting KernelMemorySanitizer\n");
+ pr_info("ATTENTION: KMSAN is a debugging tool! Do not use it on production machines!\n");
+ kmsan_enabled = true;
+}
diff --git a/mm/kmsan/instrumentation.c b/mm/kmsan/instrumentation.c
new file mode 100644
index 000000000000..280d15413268
--- /dev/null
+++ b/mm/kmsan/instrumentation.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN compiler API.
+ *
+ * This file implements __msan_XXX hooks that Clang inserts into the code
+ * compiled with -fsanitize=kernel-memory.
+ * See Documentation/dev-tools/kmsan.rst for more information on how KMSAN
+ * instrumentation works.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include "kmsan.h"
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+
+static inline bool is_bad_asm_addr(void *addr, uintptr_t size, bool is_store)
+{
+ if ((u64)addr < TASK_SIZE)
+ return true;
+ if (!kmsan_get_metadata(addr, KMSAN_META_SHADOW))
+ return true;
+ return false;
+}
+
+static inline struct shadow_origin_ptr
+get_shadow_origin_ptr(void *addr, u64 size, bool store)
+{
+ unsigned long ua_flags = user_access_save();
+ struct shadow_origin_ptr ret;
+
+ ret = kmsan_get_shadow_origin_ptr(addr, size, store);
+ user_access_restore(ua_flags);
+ return ret;
+}
+
+/* Get shadow and origin pointers for a memory load with non-standard size. */
+struct shadow_origin_ptr __msan_metadata_ptr_for_load_n(void *addr,
+ uintptr_t size)
+{
+ return get_shadow_origin_ptr(addr, size, /*store*/ false);
+}
+EXPORT_SYMBOL(__msan_metadata_ptr_for_load_n);
+
+/* Get shadow and origin pointers for a memory store with non-standard size. */
+struct shadow_origin_ptr __msan_metadata_ptr_for_store_n(void *addr,
+ uintptr_t size)
+{
+ return get_shadow_origin_ptr(addr, size, /*store*/ true);
+}
+EXPORT_SYMBOL(__msan_metadata_ptr_for_store_n);
+
+/*
+ * Declare functions that obtain shadow/origin pointers for loads and stores
+ * with fixed size.
+ */
+#define DECLARE_METADATA_PTR_GETTER(size) \
+ struct shadow_origin_ptr __msan_metadata_ptr_for_load_##size( \
+ void *addr) \
+ { \
+ return get_shadow_origin_ptr(addr, size, /*store*/ false); \
+ } \
+ EXPORT_SYMBOL(__msan_metadata_ptr_for_load_##size); \
+ struct shadow_origin_ptr __msan_metadata_ptr_for_store_##size( \
+ void *addr) \
+ { \
+ return get_shadow_origin_ptr(addr, size, /*store*/ true); \
+ } \
+ EXPORT_SYMBOL(__msan_metadata_ptr_for_store_##size)
+
+DECLARE_METADATA_PTR_GETTER(1);
+DECLARE_METADATA_PTR_GETTER(2);
+DECLARE_METADATA_PTR_GETTER(4);
+DECLARE_METADATA_PTR_GETTER(8);
+
+/*
+ * Handle a memory store performed by inline assembly. KMSAN conservatively
+ * attempts to unpoison the outputs of asm() directives to prevent false
+ * positives caused by missed stores.
+ */
+void __msan_instrument_asm_store(void *addr, uintptr_t size)
+{
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ ua_flags = user_access_save();
+ /*
+ * Most of the accesses are below 32 bytes. The two exceptions so far
+ * are clwb() (64 bytes) and FPU state (512 bytes).
+ * It's unlikely that the assembly will touch more than 512 bytes.
+ */
+ if (size > 512) {
+ WARN_ONCE(1, "assembly store size too big: %ld\n", size);
+ size = 8;
+ }
+ if (is_bad_asm_addr(addr, size, /*is_store*/ true)) {
+ user_access_restore(ua_flags);
+ return;
+ }
+ kmsan_enter_runtime();
+ /* Unpoisoning the memory on best effort. */
+ kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
+ kmsan_leave_runtime();
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(__msan_instrument_asm_store);
+
+/*
+ * KMSAN instrumentation pass replaces LLVM memcpy, memmove and memset
+ * intrinsics with calls to respective __msan_ functions. We use
+ * get_param0_metadata() and set_retval_metadata() to store the shadow/origin
+ * values for the destination argument of these functions and use them for the
+ * functions' return values.
+ */
+static inline void get_param0_metadata(u64 *shadow,
+ depot_stack_handle_t *origin)
+{
+ struct kmsan_ctx *ctx = kmsan_get_context();
+
+ *shadow = *(u64 *)(ctx->cstate.param_tls);
+ *origin = ctx->cstate.param_origin_tls[0];
+}
+
+static inline void set_retval_metadata(u64 shadow, depot_stack_handle_t origin)
+{
+ struct kmsan_ctx *ctx = kmsan_get_context();
+
+ *(u64 *)(ctx->cstate.retval_tls) = shadow;
+ ctx->cstate.retval_origin_tls = origin;
+}
+
+/* Handle llvm.memmove intrinsic. */
+void *__msan_memmove(void *dst, const void *src, uintptr_t n)
+{
+ depot_stack_handle_t origin;
+ void *result;
+ u64 shadow;
+
+ get_param0_metadata(&shadow, &origin);
+ result = __memmove(dst, src, n);
+ if (!n)
+ /* Some people call memmove() with zero length. */
+ return result;
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return result;
+
+ kmsan_enter_runtime();
+ kmsan_internal_memmove_metadata(dst, (void *)src, n);
+ kmsan_leave_runtime();
+
+ set_retval_metadata(shadow, origin);
+ return result;
+}
+EXPORT_SYMBOL(__msan_memmove);
+
+/* Handle llvm.memcpy intrinsic. */
+void *__msan_memcpy(void *dst, const void *src, uintptr_t n)
+{
+ depot_stack_handle_t origin;
+ void *result;
+ u64 shadow;
+
+ get_param0_metadata(&shadow, &origin);
+ result = __memcpy(dst, src, n);
+ if (!n)
+ /* Some people call memcpy() with zero length. */
+ return result;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return result;
+
+ kmsan_enter_runtime();
+ /* Using memmove instead of memcpy doesn't affect correctness. */
+ kmsan_internal_memmove_metadata(dst, (void *)src, n);
+ kmsan_leave_runtime();
+
+ set_retval_metadata(shadow, origin);
+ return result;
+}
+EXPORT_SYMBOL(__msan_memcpy);
+
+/* Handle llvm.memset intrinsic. */
+void *__msan_memset(void *dst, int c, uintptr_t n)
+{
+ depot_stack_handle_t origin;
+ void *result;
+ u64 shadow;
+
+ get_param0_metadata(&shadow, &origin);
+ result = __memset(dst, c, n);
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return result;
+
+ kmsan_enter_runtime();
+ /*
+ * Clang doesn't pass parameter metadata here, so it is impossible to
+ * use shadow of @c to set up the shadow for @dst.
+ */
+ kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
+ kmsan_leave_runtime();
+
+ set_retval_metadata(shadow, origin);
+ return result;
+}
+EXPORT_SYMBOL(__msan_memset);
+
+/*
+ * Create a new origin from an old one. This is done when storing an
+ * uninitialized value to memory. When reporting an error, KMSAN unrolls and
+ * prints the whole chain of stores that preceded the use of this value.
+ */
+depot_stack_handle_t __msan_chain_origin(depot_stack_handle_t origin)
+{
+ depot_stack_handle_t ret = 0;
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return ret;
+
+ ua_flags = user_access_save();
+
+ /* Creating new origins may allocate memory. */
+ kmsan_enter_runtime();
+ ret = kmsan_internal_chain_origin(origin);
+ kmsan_leave_runtime();
+ user_access_restore(ua_flags);
+ return ret;
+}
+EXPORT_SYMBOL(__msan_chain_origin);
+
+/* Poison a local variable when entering a function. */
+void __msan_poison_alloca(void *address, uintptr_t size, char *descr)
+{
+ depot_stack_handle_t handle;
+ unsigned long entries[4];
+ unsigned long ua_flags;
+
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ ua_flags = user_access_save();
+ entries[0] = KMSAN_ALLOCA_MAGIC_ORIGIN;
+ entries[1] = (u64)descr;
+ entries[2] = (u64)__builtin_return_address(0);
+ /*
+ * With frame pointers enabled, it is possible to quickly fetch the
+ * second frame of the caller stack without calling the unwinder.
+ * Without them, simply do not bother.
+ */
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER))
+ entries[3] = (u64)__builtin_return_address(1);
+ else
+ entries[3] = 0;
+
+ /* stack_depot_save() may allocate memory. */
+ kmsan_enter_runtime();
+ handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC);
+ kmsan_leave_runtime();
+
+ kmsan_internal_set_shadow_origin(address, size, -1, handle,
+ /*checked*/ true);
+ user_access_restore(ua_flags);
+}
+EXPORT_SYMBOL(__msan_poison_alloca);
+
+/* Unpoison a local variable. */
+void __msan_unpoison_alloca(void *address, uintptr_t size)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+
+ kmsan_enter_runtime();
+ kmsan_internal_unpoison_memory(address, size, /*checked*/ true);
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(__msan_unpoison_alloca);
+
+/*
+ * Report that an uninitialized value with the given origin was used in a way
+ * that constituted undefined behavior.
+ */
+void __msan_warning(u32 origin)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ kmsan_report(origin, /*address*/ 0, /*size*/ 0,
+ /*off_first*/ 0, /*off_last*/ 0, /*user_addr*/ 0,
+ REASON_ANY);
+ kmsan_leave_runtime();
+}
+EXPORT_SYMBOL(__msan_warning);
+
+/*
+ * At the beginning of an instrumented function, obtain the pointer to
+ * `struct kmsan_context_state` holding the metadata for function parameters.
+ */
+struct kmsan_context_state *__msan_get_context_state(void)
+{
+ return &kmsan_get_context()->cstate;
+}
+EXPORT_SYMBOL(__msan_get_context_state);
diff --git a/mm/kmsan/kmsan.h b/mm/kmsan/kmsan.h
new file mode 100644
index 000000000000..7019c46d33a7
--- /dev/null
+++ b/mm/kmsan/kmsan.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Functions used by the KMSAN runtime.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#ifndef __MM_KMSAN_KMSAN_H
+#define __MM_KMSAN_KMSAN_H
+
+#include <asm/pgtable_64_types.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/printk.h>
+
+#define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
+#define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200
+
+#define KMSAN_POISON_NOCHECK 0x0
+#define KMSAN_POISON_CHECK 0x1
+#define KMSAN_POISON_FREE 0x2
+
+#define KMSAN_ORIGIN_SIZE 4
+#define KMSAN_MAX_ORIGIN_DEPTH 7
+
+#define KMSAN_STACK_DEPTH 64
+
+#define KMSAN_META_SHADOW (false)
+#define KMSAN_META_ORIGIN (true)
+
+extern bool kmsan_enabled;
+extern int panic_on_kmsan;
+
+/*
+ * KMSAN performs a lot of consistency checks that are currently enabled by
+ * default. BUG_ON is normally discouraged in the kernel, unless used for
+ * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
+ * recover if something goes wrong.
+ */
+#define KMSAN_WARN_ON(cond) \
+ ({ \
+ const bool __cond = WARN_ON(cond); \
+ if (unlikely(__cond)) { \
+ WRITE_ONCE(kmsan_enabled, false); \
+ if (panic_on_kmsan) { \
+ /* Can't call panic() here because */ \
+ /* of uaccess checks. */ \
+ BUG(); \
+ } \
+ } \
+ __cond; \
+ })
+
+/*
+ * A pair of metadata pointers to be returned by the instrumentation functions.
+ */
+struct shadow_origin_ptr {
+ void *shadow, *origin;
+};
+
+struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
+ bool store);
+void *kmsan_get_metadata(void *addr, bool is_origin);
+void __init kmsan_init_alloc_meta_for_range(void *start, void *end);
+
+enum kmsan_bug_reason {
+ REASON_ANY,
+ REASON_COPY_TO_USER,
+ REASON_SUBMIT_URB,
+};
+
+void kmsan_print_origin(depot_stack_handle_t origin);
+
+/**
+ * kmsan_report() - Report a use of uninitialized value.
+ * @origin: Stack ID of the uninitialized value.
+ * @address: Address at which the memory access happens.
+ * @size: Memory access size.
+ * @off_first: Offset (from @address) of the first byte to be reported.
+ * @off_last: Offset (from @address) of the last byte to be reported.
+ * @user_addr: When non-NULL, denotes the userspace address to which the kernel
+ * is leaking data.
+ * @reason: Error type from enum kmsan_bug_reason.
+ *
+ * kmsan_report() prints an error message for a consequent group of bytes
+ * sharing the same origin. If an uninitialized value is used in a comparison,
+ * this function is called once without specifying the addresses. When checking
+ * a memory range, KMSAN may call kmsan_report() multiple times with the same
+ * @address, @size, @user_addr and @reason, but different @off_first and
+ * @off_last corresponding to different @origin values.
+ */
+void kmsan_report(depot_stack_handle_t origin, void *address, int size,
+ int off_first, int off_last, const void *user_addr,
+ enum kmsan_bug_reason reason);
+
+DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
+
+static __always_inline struct kmsan_ctx *kmsan_get_context(void)
+{
+ return in_task() ? &current->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
+}
+
+/*
+ * When a compiler hook or KMSAN runtime function is invoked, it may make a
+ * call to instrumented code and eventually call itself recursively. To avoid
+ * that, we guard the runtime entry regions with
+ * kmsan_enter_runtime()/kmsan_leave_runtime() and exit the hook if
+ * kmsan_in_runtime() is true.
+ *
+ * Non-runtime code may occasionally get executed in nested IRQs from the
+ * runtime code (e.g. when called via smp_call_function_single()). Because some
+ * KMSAN routines may take locks (e.g. for memory allocation), we conservatively
+ * bail out instead of calling them. To minimize the effect of this (potentially
+ * missing initialization events) kmsan_in_runtime() is not checked in
+ * non-blocking runtime functions.
+ */
+static __always_inline bool kmsan_in_runtime(void)
+{
+ if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
+ return true;
+ return kmsan_get_context()->kmsan_in_runtime;
+}
+
+static __always_inline void kmsan_enter_runtime(void)
+{
+ struct kmsan_ctx *ctx;
+
+ ctx = kmsan_get_context();
+ KMSAN_WARN_ON(ctx->kmsan_in_runtime++);
+}
+
+static __always_inline void kmsan_leave_runtime(void)
+{
+ struct kmsan_ctx *ctx = kmsan_get_context();
+
+ KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
+}
+
+depot_stack_handle_t kmsan_save_stack(void);
+depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
+ unsigned int extra_bits);
+
+/*
+ * Pack and unpack the origin chain depth and UAF flag to/from the extra bits
+ * provided by the stack depot.
+ * The UAF flag is stored in the lowest bit, followed by the depth in the upper
+ * bits.
+ * set_dsh_extra_bits() is responsible for clamping the value.
+ */
+static __always_inline unsigned int kmsan_extra_bits(unsigned int depth,
+ bool uaf)
+{
+ return (depth << 1) | uaf;
+}
+
+static __always_inline bool kmsan_uaf_from_eb(unsigned int extra_bits)
+{
+ return extra_bits & 1;
+}
+
+static __always_inline unsigned int kmsan_depth_from_eb(unsigned int extra_bits)
+{
+ return extra_bits >> 1;
+}
+
+/*
+ * kmsan_internal_ functions are supposed to be very simple and not require the
+ * kmsan_in_runtime() checks.
+ */
+void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
+void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
+ unsigned int poison_flags);
+void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked);
+void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
+ u32 origin, bool checked);
+depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);
+
+void kmsan_internal_task_create(struct task_struct *task);
+
+bool kmsan_metadata_is_contiguous(void *addr, size_t size);
+void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
+ int reason);
+
+struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
+void kmsan_setup_meta(struct page *page, struct page *shadow,
+ struct page *origin, int order);
+
+/*
+ * kmsan_internal_is_module_addr() and kmsan_internal_is_vmalloc_addr() are
+ * non-instrumented versions of is_module_address() and is_vmalloc_addr() that
+ * are safe to call from KMSAN runtime without recursion.
+ */
+static inline bool kmsan_internal_is_module_addr(void *vaddr)
+{
+ return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
+}
+
+static inline bool kmsan_internal_is_vmalloc_addr(void *addr)
+{
+ return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
+}
+
+#endif /* __MM_KMSAN_KMSAN_H */
diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
new file mode 100644
index 000000000000..9a29ea2dbfb9
--- /dev/null
+++ b/mm/kmsan/kmsan_test.c
@@ -0,0 +1,581 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for KMSAN.
+ * For each test case checks the presence (or absence) of generated reports.
+ * Relies on 'console' tracepoint to capture reports as they appear in the
+ * kernel log.
+ *
+ * Copyright (C) 2021-2022, Google LLC.
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include <kunit/test.h>
+#include "kmsan.h"
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kmsan.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/tracepoint.h>
+#include <trace/events/printk.h>
+
+static DEFINE_PER_CPU(int, per_cpu_var);
+
+/* Report as observed from console. */
+static struct {
+ spinlock_t lock;
+ bool available;
+ bool ignore; /* Stop console output collection. */
+ char header[256];
+} observed = {
+ .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
+};
+
+/* Probe for console output: obtains observed lines of interest. */
+static void probe_console(void *ignore, const char *buf, size_t len)
+{
+ unsigned long flags;
+
+ if (observed.ignore)
+ return;
+ spin_lock_irqsave(&observed.lock, flags);
+
+ if (strnstr(buf, "BUG: KMSAN: ", len)) {
+ /*
+ * KMSAN report and related to the test.
+ *
+ * The provided @buf is not NUL-terminated; copy no more than
+ * @len bytes and let strscpy() add the missing NUL-terminator.
+ */
+ strscpy(observed.header, buf,
+ min(len + 1, sizeof(observed.header)));
+ WRITE_ONCE(observed.available, true);
+ observed.ignore = true;
+ }
+ spin_unlock_irqrestore(&observed.lock, flags);
+}
+
+/* Check if a report related to the test exists. */
+static bool report_available(void)
+{
+ return READ_ONCE(observed.available);
+}
+
+/* Information we expect in a report. */
+struct expect_report {
+ const char *error_type; /* Error type. */
+ /*
+ * Kernel symbol from the error header, or NULL if no report is
+ * expected.
+ */
+ const char *symbol;
+};
+
+/* Check observed report matches information in @r. */
+static bool report_matches(const struct expect_report *r)
+{
+ typeof(observed.header) expected_header;
+ unsigned long flags;
+ bool ret = false;
+ const char *end;
+ char *cur;
+
+ /* Doubled-checked locking. */
+ if (!report_available() || !r->symbol)
+ return (!report_available() && !r->symbol);
+
+ /* Generate expected report contents. */
+
+ /* Title */
+ cur = expected_header;
+ end = &expected_header[sizeof(expected_header) - 1];
+
+ cur += scnprintf(cur, end - cur, "BUG: KMSAN: %s", r->error_type);
+
+ scnprintf(cur, end - cur, " in %s", r->symbol);
+ /* The exact offset won't match, remove it; also strip module name. */
+ cur = strchr(expected_header, '+');
+ if (cur)
+ *cur = '\0';
+
+ spin_lock_irqsave(&observed.lock, flags);
+ if (!report_available())
+ goto out; /* A new report is being captured. */
+
+ /* Finally match expected output to what we actually observed. */
+ ret = strstr(observed.header, expected_header);
+out:
+ spin_unlock_irqrestore(&observed.lock, flags);
+
+ return ret;
+}
+
+/* ===== Test cases ===== */
+
+/* Prevent replacing branch with select in LLVM. */
+static noinline void check_true(char *arg)
+{
+ pr_info("%s is true\n", arg);
+}
+
+static noinline void check_false(char *arg)
+{
+ pr_info("%s is false\n", arg);
+}
+
+#define USE(x) \
+ do { \
+ if (x) \
+ check_true(#x); \
+ else \
+ check_false(#x); \
+ } while (0)
+
+#define EXPECTATION_ETYPE_FN(e, reason, fn) \
+ struct expect_report e = { \
+ .error_type = reason, \
+ .symbol = fn, \
+ }
+
+#define EXPECTATION_NO_REPORT(e) EXPECTATION_ETYPE_FN(e, NULL, NULL)
+#define EXPECTATION_UNINIT_VALUE_FN(e, fn) \
+ EXPECTATION_ETYPE_FN(e, "uninit-value", fn)
+#define EXPECTATION_UNINIT_VALUE(e) EXPECTATION_UNINIT_VALUE_FN(e, __func__)
+#define EXPECTATION_USE_AFTER_FREE(e) \
+ EXPECTATION_ETYPE_FN(e, "use-after-free", __func__)
+
+/* Test case: ensure that kmalloc() returns uninitialized memory. */
+static void test_uninit_kmalloc(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE(expect);
+ int *ptr;
+
+ kunit_info(test, "uninitialized kmalloc test (UMR report)\n");
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ USE(*ptr);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that kmalloc'ed memory becomes initialized after memset().
+ */
+static void test_init_kmalloc(struct kunit *test)
+{
+ EXPECTATION_NO_REPORT(expect);
+ int *ptr;
+
+ kunit_info(test, "initialized kmalloc test (no reports)\n");
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ memset(ptr, 0, sizeof(*ptr));
+ USE(*ptr);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that kzalloc() returns initialized memory. */
+static void test_init_kzalloc(struct kunit *test)
+{
+ EXPECTATION_NO_REPORT(expect);
+ int *ptr;
+
+ kunit_info(test, "initialized kzalloc test (no reports)\n");
+ ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+ USE(*ptr);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that local variables are uninitialized by default. */
+static void test_uninit_stack_var(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE(expect);
+ volatile int cond;
+
+ kunit_info(test, "uninitialized stack variable (UMR report)\n");
+ USE(cond);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that local variables with initializers are initialized. */
+static void test_init_stack_var(struct kunit *test)
+{
+ EXPECTATION_NO_REPORT(expect);
+ volatile int cond = 1;
+
+ kunit_info(test, "initialized stack variable (no reports)\n");
+ USE(cond);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+static noinline void two_param_fn_2(int arg1, int arg2)
+{
+ USE(arg1);
+ USE(arg2);
+}
+
+static noinline void one_param_fn(int arg)
+{
+ two_param_fn_2(arg, arg);
+ USE(arg);
+}
+
+static noinline void two_param_fn(int arg1, int arg2)
+{
+ int init = 0;
+
+ one_param_fn(init);
+ USE(arg1);
+ USE(arg2);
+}
+
+static void test_params(struct kunit *test)
+{
+#ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
+ /*
+ * With eager param/retval checking enabled, KMSAN will report an error
+ * before the call to two_param_fn().
+ */
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_params");
+#else
+ EXPECTATION_UNINIT_VALUE_FN(expect, "two_param_fn");
+#endif
+ volatile int uninit, init = 1;
+
+ kunit_info(test,
+ "uninit passed through a function parameter (UMR report)\n");
+ two_param_fn(uninit, init);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+static int signed_sum3(int a, int b, int c)
+{
+ return a + b + c;
+}
+
+/*
+ * Test case: ensure that uninitialized values are tracked through function
+ * arguments.
+ */
+static void test_uninit_multiple_params(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE(expect);
+ volatile char b = 3, c;
+ volatile int a;
+
+ kunit_info(test, "uninitialized local passed to fn (UMR report)\n");
+ USE(signed_sum3(a, b, c));
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Helper function to make an array uninitialized. */
+static noinline void do_uninit_local_array(char *array, int start, int stop)
+{
+ volatile char uninit;
+
+ for (int i = start; i < stop; i++)
+ array[i] = uninit;
+}
+
+/*
+ * Test case: ensure kmsan_check_memory() reports an error when checking
+ * uninitialized memory.
+ */
+static void test_uninit_kmsan_check_memory(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_uninit_kmsan_check_memory");
+ volatile char local_array[8];
+
+ kunit_info(
+ test,
+ "kmsan_check_memory() called on uninit local (UMR report)\n");
+ do_uninit_local_array((char *)local_array, 5, 7);
+
+ kmsan_check_memory((char *)local_array, 8);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: check that a virtual memory range created with vmap() from
+ * initialized pages is still considered as initialized.
+ */
+static void test_init_kmsan_vmap_vunmap(struct kunit *test)
+{
+ EXPECTATION_NO_REPORT(expect);
+ const int npages = 2;
+ struct page **pages;
+ void *vbuf;
+
+ kunit_info(test, "pages initialized via vmap (no reports)\n");
+
+ pages = kmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
+ for (int i = 0; i < npages; i++)
+ pages[i] = alloc_page(GFP_KERNEL);
+ vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ memset(vbuf, 0xfe, npages * PAGE_SIZE);
+ for (int i = 0; i < npages; i++)
+ kmsan_check_memory(page_address(pages[i]), PAGE_SIZE);
+
+ if (vbuf)
+ vunmap(vbuf);
+ for (int i = 0; i < npages; i++) {
+ if (pages[i])
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that memset() can initialize a buffer allocated via
+ * vmalloc().
+ */
+static void test_init_vmalloc(struct kunit *test)
+{
+ EXPECTATION_NO_REPORT(expect);
+ int npages = 8;
+ char *buf;
+
+ kunit_info(test, "vmalloc buffer can be initialized (no reports)\n");
+ buf = vmalloc(PAGE_SIZE * npages);
+ buf[0] = 1;
+ memset(buf, 0xfe, PAGE_SIZE * npages);
+ USE(buf[0]);
+ for (int i = 0; i < npages; i++)
+ kmsan_check_memory(&buf[PAGE_SIZE * i], PAGE_SIZE);
+ vfree(buf);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/* Test case: ensure that use-after-free reporting works. */
+static void test_uaf(struct kunit *test)
+{
+ EXPECTATION_USE_AFTER_FREE(expect);
+ volatile int value;
+ volatile int *var;
+
+ kunit_info(test, "use-after-free in kmalloc-ed buffer (UMR report)\n");
+ var = kmalloc(80, GFP_KERNEL);
+ var[3] = 0xfeedface;
+ kfree((int *)var);
+ /* Copy the invalid value before checking it. */
+ value = var[3];
+ USE(value);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that uninitialized values are propagated through per-CPU
+ * memory.
+ */
+static void test_percpu_propagate(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE(expect);
+ volatile int uninit, check;
+
+ kunit_info(test,
+ "uninit local stored to per_cpu memory (UMR report)\n");
+
+ this_cpu_write(per_cpu_var, uninit);
+ check = this_cpu_read(per_cpu_var);
+ USE(check);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that passing uninitialized values to printk() leads to an
+ * error report.
+ */
+static void test_printk(struct kunit *test)
+{
+#ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
+ /*
+ * With eager param/retval checking enabled, KMSAN will report an error
+ * before the call to pr_info().
+ */
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_printk");
+#else
+ EXPECTATION_UNINIT_VALUE_FN(expect, "number");
+#endif
+ volatile int uninit;
+
+ kunit_info(test, "uninit local passed to pr_info() (UMR report)\n");
+ pr_info("%px contains %d\n", &uninit, uninit);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that memcpy() correctly copies uninitialized values between
+ * aligned `src` and `dst`.
+ */
+static void test_memcpy_aligned_to_aligned(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_aligned");
+ volatile int uninit_src;
+ volatile int dst = 0;
+
+ kunit_info(
+ test,
+ "memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
+ memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
+ kmsan_check_memory((void *)&dst, sizeof(dst));
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that memcpy() correctly copies uninitialized values between
+ * aligned `src` and unaligned `dst`.
+ *
+ * Copying aligned 4-byte value to an unaligned one leads to touching two
+ * aligned 4-byte values. This test case checks that KMSAN correctly reports an
+ * error on the first of the two values.
+ */
+static void test_memcpy_aligned_to_unaligned(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_aligned_to_unaligned");
+ volatile int uninit_src;
+ volatile char dst[8] = { 0 };
+
+ kunit_info(
+ test,
+ "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
+ memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+ kmsan_check_memory((void *)dst, 4);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+/*
+ * Test case: ensure that memcpy() correctly copies uninitialized values between
+ * aligned `src` and unaligned `dst`.
+ *
+ * Copying aligned 4-byte value to an unaligned one leads to touching two
+ * aligned 4-byte values. This test case checks that KMSAN correctly reports an
+ * error on the second of the two values.
+ */
+static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect,
+ "test_memcpy_aligned_to_unaligned2");
+ volatile int uninit_src;
+ volatile char dst[8] = { 0 };
+
+ kunit_info(
+ test,
+ "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
+ memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
+ kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+static noinline void fibonacci(int *array, int size, int start) {
+ if (start < 2 || (start == size))
+ return;
+ array[start] = array[start - 1] + array[start - 2];
+ fibonacci(array, size, start + 1);
+}
+
+static void test_long_origin_chain(struct kunit *test)
+{
+ EXPECTATION_UNINIT_VALUE_FN(expect,
+ "test_long_origin_chain");
+ /* (KMSAN_MAX_ORIGIN_DEPTH * 2) recursive calls to fibonacci(). */
+ volatile int accum[KMSAN_MAX_ORIGIN_DEPTH * 2 + 2];
+ int last = ARRAY_SIZE(accum) - 1;
+
+ kunit_info(
+ test,
+ "origin chain exceeding KMSAN_MAX_ORIGIN_DEPTH (UMR report)\n");
+ /*
+ * We do not set accum[1] to 0, so the uninitializedness will be carried
+ * over to accum[2..last].
+ */
+ accum[0] = 1;
+ fibonacci((int *)accum, ARRAY_SIZE(accum), 2);
+ kmsan_check_memory((void *)&accum[last], sizeof(int));
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+}
+
+static struct kunit_case kmsan_test_cases[] = {
+ KUNIT_CASE(test_uninit_kmalloc),
+ KUNIT_CASE(test_init_kmalloc),
+ KUNIT_CASE(test_init_kzalloc),
+ KUNIT_CASE(test_uninit_stack_var),
+ KUNIT_CASE(test_init_stack_var),
+ KUNIT_CASE(test_params),
+ KUNIT_CASE(test_uninit_multiple_params),
+ KUNIT_CASE(test_uninit_kmsan_check_memory),
+ KUNIT_CASE(test_init_kmsan_vmap_vunmap),
+ KUNIT_CASE(test_init_vmalloc),
+ KUNIT_CASE(test_uaf),
+ KUNIT_CASE(test_percpu_propagate),
+ KUNIT_CASE(test_printk),
+ KUNIT_CASE(test_memcpy_aligned_to_aligned),
+ KUNIT_CASE(test_memcpy_aligned_to_unaligned),
+ KUNIT_CASE(test_memcpy_aligned_to_unaligned2),
+ KUNIT_CASE(test_long_origin_chain),
+ {},
+};
+
+/* ===== End test cases ===== */
+
+static int test_init(struct kunit *test)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&observed.lock, flags);
+ observed.header[0] = '\0';
+ observed.ignore = false;
+ observed.available = false;
+ spin_unlock_irqrestore(&observed.lock, flags);
+
+ return 0;
+}
+
+static void test_exit(struct kunit *test)
+{
+}
+
+static void register_tracepoints(struct tracepoint *tp, void *ignore)
+{
+ check_trace_callback_type_console(probe_console);
+ if (!strcmp(tp->name, "console"))
+ WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
+}
+
+static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
+{
+ if (!strcmp(tp->name, "console"))
+ tracepoint_probe_unregister(tp, probe_console, NULL);
+}
+
+static int kmsan_suite_init(struct kunit_suite *suite)
+{
+ /*
+ * Because we want to be able to build the test as a module, we need to
+ * iterate through all known tracepoints, since the static registration
+ * won't work here.
+ */
+ for_each_kernel_tracepoint(register_tracepoints, NULL);
+ return 0;
+}
+
+static void kmsan_suite_exit(struct kunit_suite *suite)
+{
+ for_each_kernel_tracepoint(unregister_tracepoints, NULL);
+ tracepoint_synchronize_unregister();
+}
+
+static struct kunit_suite kmsan_test_suite = {
+ .name = "kmsan",
+ .test_cases = kmsan_test_cases,
+ .init = test_init,
+ .exit = test_exit,
+ .suite_init = kmsan_suite_init,
+ .suite_exit = kmsan_suite_exit,
+};
+kunit_test_suites(&kmsan_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Potapenko <glider@google.com>");
diff --git a/mm/kmsan/report.c b/mm/kmsan/report.c
new file mode 100644
index 000000000000..02736ec757f2
--- /dev/null
+++ b/mm/kmsan/report.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN error reporting routines.
+ *
+ * Copyright (C) 2019-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/moduleparam.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include "kmsan.h"
+
+static DEFINE_RAW_SPINLOCK(kmsan_report_lock);
+#define DESCR_SIZE 128
+/* Protected by kmsan_report_lock */
+static char report_local_descr[DESCR_SIZE];
+int panic_on_kmsan __read_mostly;
+
+#ifdef MODULE_PARAM_PREFIX
+#undef MODULE_PARAM_PREFIX
+#endif
+#define MODULE_PARAM_PREFIX "kmsan."
+module_param_named(panic, panic_on_kmsan, int, 0);
+
+/*
+ * Skip internal KMSAN frames.
+ */
+static int get_stack_skipnr(const unsigned long stack_entries[],
+ int num_entries)
+{
+ int len, skip;
+ char buf[64];
+
+ for (skip = 0; skip < num_entries; ++skip) {
+ len = scnprintf(buf, sizeof(buf), "%ps",
+ (void *)stack_entries[skip]);
+
+ /* Never show __msan_* or kmsan_* functions. */
+ if ((strnstr(buf, "__msan_", len) == buf) ||
+ (strnstr(buf, "kmsan_", len) == buf))
+ continue;
+
+ /*
+ * No match for runtime functions -- @skip entries to skip to
+ * get to first frame of interest.
+ */
+ break;
+ }
+
+ return skip;
+}
+
+/*
+ * Currently the descriptions of locals generated by Clang look as follows:
+ * ----local_name@function_name
+ * We want to print only the name of the local, as other information in that
+ * description can be confusing.
+ * The meaningful part of the description is copied to a global buffer to avoid
+ * allocating memory.
+ */
+static char *pretty_descr(char *descr)
+{
+ int pos = 0, len = strlen(descr);
+
+ for (int i = 0; i < len; i++) {
+ if (descr[i] == '@')
+ break;
+ if (descr[i] == '-')
+ continue;
+ report_local_descr[pos] = descr[i];
+ if (pos + 1 == DESCR_SIZE)
+ break;
+ pos++;
+ }
+ report_local_descr[pos] = 0;
+ return report_local_descr;
+}
+
+void kmsan_print_origin(depot_stack_handle_t origin)
+{
+ unsigned long *entries = NULL, *chained_entries = NULL;
+ unsigned int nr_entries, chained_nr_entries, skipnr;
+ void *pc1 = NULL, *pc2 = NULL;
+ depot_stack_handle_t head;
+ unsigned long magic;
+ char *descr = NULL;
+ unsigned int depth;
+
+ if (!origin)
+ return;
+
+ while (true) {
+ nr_entries = stack_depot_fetch(origin, &entries);
+ depth = kmsan_depth_from_eb(stack_depot_get_extra_bits(origin));
+ magic = nr_entries ? entries[0] : 0;
+ if ((nr_entries == 4) && (magic == KMSAN_ALLOCA_MAGIC_ORIGIN)) {
+ descr = (char *)entries[1];
+ pc1 = (void *)entries[2];
+ pc2 = (void *)entries[3];
+ pr_err("Local variable %s created at:\n",
+ pretty_descr(descr));
+ if (pc1)
+ pr_err(" %pSb\n", pc1);
+ if (pc2)
+ pr_err(" %pSb\n", pc2);
+ break;
+ }
+ if ((nr_entries == 3) && (magic == KMSAN_CHAIN_MAGIC_ORIGIN)) {
+ /*
+ * Origin chains deeper than KMSAN_MAX_ORIGIN_DEPTH are
+ * not stored, so the output may be incomplete.
+ */
+ if (depth == KMSAN_MAX_ORIGIN_DEPTH)
+ pr_err("<Zero or more stacks not recorded to save memory>\n\n");
+ head = entries[1];
+ origin = entries[2];
+ pr_err("Uninit was stored to memory at:\n");
+ chained_nr_entries =
+ stack_depot_fetch(head, &chained_entries);
+ kmsan_internal_unpoison_memory(
+ chained_entries,
+ chained_nr_entries * sizeof(*chained_entries),
+ /*checked*/ false);
+ skipnr = get_stack_skipnr(chained_entries,
+ chained_nr_entries);
+ stack_trace_print(chained_entries + skipnr,
+ chained_nr_entries - skipnr, 0);
+ pr_err("\n");
+ continue;
+ }
+ pr_err("Uninit was created at:\n");
+ if (nr_entries) {
+ skipnr = get_stack_skipnr(entries, nr_entries);
+ stack_trace_print(entries + skipnr, nr_entries - skipnr,
+ 0);
+ } else {
+ pr_err("(stack is not available)\n");
+ }
+ break;
+ }
+}
+
+void kmsan_report(depot_stack_handle_t origin, void *address, int size,
+ int off_first, int off_last, const void *user_addr,
+ enum kmsan_bug_reason reason)
+{
+ unsigned long stack_entries[KMSAN_STACK_DEPTH];
+ int num_stack_entries, skipnr;
+ char *bug_type = NULL;
+ unsigned long ua_flags;
+ bool is_uaf;
+
+ if (!kmsan_enabled)
+ return;
+ if (!current->kmsan_ctx.allow_reporting)
+ return;
+ if (!origin)
+ return;
+
+ current->kmsan_ctx.allow_reporting = false;
+ ua_flags = user_access_save();
+ raw_spin_lock(&kmsan_report_lock);
+ pr_err("=====================================================\n");
+ is_uaf = kmsan_uaf_from_eb(stack_depot_get_extra_bits(origin));
+ switch (reason) {
+ case REASON_ANY:
+ bug_type = is_uaf ? "use-after-free" : "uninit-value";
+ break;
+ case REASON_COPY_TO_USER:
+ bug_type = is_uaf ? "kernel-infoleak-after-free" :
+ "kernel-infoleak";
+ break;
+ case REASON_SUBMIT_URB:
+ bug_type = is_uaf ? "kernel-usb-infoleak-after-free" :
+ "kernel-usb-infoleak";
+ break;
+ }
+
+ num_stack_entries =
+ stack_trace_save(stack_entries, KMSAN_STACK_DEPTH, 1);
+ skipnr = get_stack_skipnr(stack_entries, num_stack_entries);
+
+ pr_err("BUG: KMSAN: %s in %pSb\n", bug_type,
+ (void *)stack_entries[skipnr]);
+ stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr,
+ 0);
+ pr_err("\n");
+
+ kmsan_print_origin(origin);
+
+ if (size) {
+ pr_err("\n");
+ if (off_first == off_last)
+ pr_err("Byte %d of %d is uninitialized\n", off_first,
+ size);
+ else
+ pr_err("Bytes %d-%d of %d are uninitialized\n",
+ off_first, off_last, size);
+ }
+ if (address)
+ pr_err("Memory access of size %d starts at %px\n", size,
+ address);
+ if (user_addr && reason == REASON_COPY_TO_USER)
+ pr_err("Data copied to user address %px\n", user_addr);
+ pr_err("\n");
+ dump_stack_print_info(KERN_ERR);
+ pr_err("=====================================================\n");
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock(&kmsan_report_lock);
+ if (panic_on_kmsan)
+ panic("kmsan.panic set ...\n");
+ user_access_restore(ua_flags);
+ current->kmsan_ctx.allow_reporting = true;
+}
diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c
new file mode 100644
index 000000000000..21e3e196ec3c
--- /dev/null
+++ b/mm/kmsan/shadow.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KMSAN shadow implementation.
+ *
+ * Copyright (C) 2017-2022 Google LLC
+ * Author: Alexander Potapenko <glider@google.com>
+ *
+ */
+
+#include <asm/kmsan.h>
+#include <asm/tlbflush.h>
+#include <linux/cacheflush.h>
+#include <linux/memblock.h>
+#include <linux/mm_types.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+
+#include "../internal.h"
+#include "kmsan.h"
+
+#define shadow_page_for(page) ((page)->kmsan_shadow)
+
+#define origin_page_for(page) ((page)->kmsan_origin)
+
+static void *shadow_ptr_for(struct page *page)
+{
+ return page_address(shadow_page_for(page));
+}
+
+static void *origin_ptr_for(struct page *page)
+{
+ return page_address(origin_page_for(page));
+}
+
+static bool page_has_metadata(struct page *page)
+{
+ return shadow_page_for(page) && origin_page_for(page);
+}
+
+static void set_no_shadow_origin_page(struct page *page)
+{
+ shadow_page_for(page) = NULL;
+ origin_page_for(page) = NULL;
+}
+
+/*
+ * Dummy load and store pages to be used when the real metadata is unavailable.
+ * There are separate pages for loads and stores, so that every load returns a
+ * zero, and every store doesn't affect other loads.
+ */
+static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static unsigned long vmalloc_meta(void *addr, bool is_origin)
+{
+ unsigned long addr64 = (unsigned long)addr, off;
+
+ KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
+ if (kmsan_internal_is_vmalloc_addr(addr)) {
+ off = addr64 - VMALLOC_START;
+ return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
+ KMSAN_VMALLOC_SHADOW_START);
+ }
+ if (kmsan_internal_is_module_addr(addr)) {
+ off = addr64 - MODULES_VADDR;
+ return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
+ KMSAN_MODULES_SHADOW_START);
+ }
+ return 0;
+}
+
+static struct page *virt_to_page_or_null(void *vaddr)
+{
+ if (kmsan_virt_addr_valid(vaddr))
+ return virt_to_page(vaddr);
+ else
+ return NULL;
+}
+
+struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
+ bool store)
+{
+ struct shadow_origin_ptr ret;
+ void *shadow;
+
+ /*
+ * Even if we redirect this memory access to the dummy page, it will
+ * go out of bounds.
+ */
+ KMSAN_WARN_ON(size > PAGE_SIZE);
+
+ if (!kmsan_enabled)
+ goto return_dummy;
+
+ KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
+ shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
+ if (!shadow)
+ goto return_dummy;
+
+ ret.shadow = shadow;
+ ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
+ return ret;
+
+return_dummy:
+ if (store) {
+ /* Ignore this store. */
+ ret.shadow = dummy_store_page;
+ ret.origin = dummy_store_page;
+ } else {
+ /* This load will return zero. */
+ ret.shadow = dummy_load_page;
+ ret.origin = dummy_load_page;
+ }
+ return ret;
+}
+
+/*
+ * Obtain the shadow or origin pointer for the given address, or NULL if there's
+ * none. The caller must check the return value for being non-NULL if needed.
+ * The return value of this function should not depend on whether we're in the
+ * runtime or not.
+ */
+void *kmsan_get_metadata(void *address, bool is_origin)
+{
+ u64 addr = (u64)address, pad, off;
+ struct page *page;
+ void *ret;
+
+ if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
+ pad = addr % KMSAN_ORIGIN_SIZE;
+ addr -= pad;
+ }
+ address = (void *)addr;
+ if (kmsan_internal_is_vmalloc_addr(address) ||
+ kmsan_internal_is_module_addr(address))
+ return (void *)vmalloc_meta(address, is_origin);
+
+ ret = arch_kmsan_get_meta_or_null(address, is_origin);
+ if (ret)
+ return ret;
+
+ page = virt_to_page_or_null(address);
+ if (!page)
+ return NULL;
+ if (!page_has_metadata(page))
+ return NULL;
+ off = addr % PAGE_SIZE;
+
+ return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
+}
+
+void kmsan_copy_page_meta(struct page *dst, struct page *src)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ if (!dst || !page_has_metadata(dst))
+ return;
+ if (!src || !page_has_metadata(src)) {
+ kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
+ /*checked*/ false);
+ return;
+ }
+
+ kmsan_enter_runtime();
+ __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
+ __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
+ kmsan_leave_runtime();
+}
+
+void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
+{
+ bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
+ struct page *shadow, *origin;
+ depot_stack_handle_t handle;
+ int pages = 1 << order;
+
+ if (!page)
+ return;
+
+ shadow = shadow_page_for(page);
+ origin = origin_page_for(page);
+
+ if (initialized) {
+ __memset(page_address(shadow), 0, PAGE_SIZE * pages);
+ __memset(page_address(origin), 0, PAGE_SIZE * pages);
+ return;
+ }
+
+ /* Zero pages allocated by the runtime should also be initialized. */
+ if (kmsan_in_runtime())
+ return;
+
+ __memset(page_address(shadow), -1, PAGE_SIZE * pages);
+ kmsan_enter_runtime();
+ handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
+ kmsan_leave_runtime();
+ /*
+ * Addresses are page-aligned, pages are contiguous, so it's ok
+ * to just fill the origin pages with @handle.
+ */
+ for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
+ ((depot_stack_handle_t *)page_address(origin))[i] = handle;
+}
+
+void kmsan_free_page(struct page *page, unsigned int order)
+{
+ if (!kmsan_enabled || kmsan_in_runtime())
+ return;
+ kmsan_enter_runtime();
+ kmsan_internal_poison_memory(page_address(page),
+ PAGE_SIZE << compound_order(page),
+ GFP_KERNEL,
+ KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
+ kmsan_leave_runtime();
+}
+
+void kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages,
+ unsigned int page_shift)
+{
+ unsigned long shadow_start, origin_start, shadow_end, origin_end;
+ struct page **s_pages, **o_pages;
+ int nr, mapped;
+
+ if (!kmsan_enabled)
+ return;
+
+ shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
+ shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
+ if (!shadow_start)
+ return;
+
+ nr = (end - start) / PAGE_SIZE;
+ s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
+ o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
+ if (!s_pages || !o_pages)
+ goto ret;
+ for (int i = 0; i < nr; i++) {
+ s_pages[i] = shadow_page_for(pages[i]);
+ o_pages[i] = origin_page_for(pages[i]);
+ }
+ prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
+ prot = PAGE_KERNEL;
+
+ origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
+ origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
+ kmsan_enter_runtime();
+ mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
+ s_pages, page_shift);
+ KMSAN_WARN_ON(mapped);
+ mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
+ o_pages, page_shift);
+ KMSAN_WARN_ON(mapped);
+ kmsan_leave_runtime();
+ flush_tlb_kernel_range(shadow_start, shadow_end);
+ flush_tlb_kernel_range(origin_start, origin_end);
+ flush_cache_vmap(shadow_start, shadow_end);
+ flush_cache_vmap(origin_start, origin_end);
+
+ret:
+ kfree(s_pages);
+ kfree(o_pages);
+}
+
+/* Allocate metadata for pages allocated at boot time. */
+void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
+{
+ struct page *shadow_p, *origin_p;
+ void *shadow, *origin;
+ struct page *page;
+ u64 size;
+
+ start = (void *)ALIGN_DOWN((u64)start, PAGE_SIZE);
+ size = ALIGN((u64)end - (u64)start, PAGE_SIZE);
+ shadow = memblock_alloc(size, PAGE_SIZE);
+ origin = memblock_alloc(size, PAGE_SIZE);
+ for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
+ page = virt_to_page_or_null((char *)start + addr);
+ shadow_p = virt_to_page_or_null((char *)shadow + addr);
+ set_no_shadow_origin_page(shadow_p);
+ shadow_page_for(page) = shadow_p;
+ origin_p = virt_to_page_or_null((char *)origin + addr);
+ set_no_shadow_origin_page(origin_p);
+ origin_page_for(page) = origin_p;
+ }
+}
+
+void kmsan_setup_meta(struct page *page, struct page *shadow,
+ struct page *origin, int order)
+{
+ for (int i = 0; i < (1 << order); i++) {
+ set_no_shadow_origin_page(&shadow[i]);
+ set_no_shadow_origin_page(&origin[i]);
+ shadow_page_for(&page[i]) = &shadow[i];
+ origin_page_for(&page[i]) = &origin[i];
+ }
+}
diff --git a/mm/ksm.c b/mm/ksm.c
index 42ab153335a2..c19fcca9bc03 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -42,6 +42,7 @@
#include <asm/tlbflush.h>
#include "internal.h"
+#include "mm_slot.h"
#ifdef CONFIG_NUMA
#define NUMA(x) (x)
@@ -82,7 +83,7 @@
* different KSM page copy of that content
*
* Internally, the regular nodes, "dups" and "chains" are represented
- * using the same struct stable_node structure.
+ * using the same struct ksm_stable_node structure.
*
* In addition to the stable tree, KSM uses a second data structure called the
* unstable tree: this tree holds pointers to pages which have been found to
@@ -112,17 +113,13 @@
*/
/**
- * struct mm_slot - ksm information per mm that is being scanned
- * @link: link to the mm_slots hash list
- * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
+ * struct ksm_mm_slot - ksm information per mm that is being scanned
+ * @slot: hash lookup from mm to mm_slot
* @rmap_list: head for this mm_slot's singly-linked list of rmap_items
- * @mm: the mm that this information is valid for
*/
-struct mm_slot {
- struct hlist_node link;
- struct list_head mm_list;
- struct rmap_item *rmap_list;
- struct mm_struct *mm;
+struct ksm_mm_slot {
+ struct mm_slot slot;
+ struct ksm_rmap_item *rmap_list;
};
/**
@@ -135,14 +132,14 @@ struct mm_slot {
* There is only the one ksm_scan instance of this cursor structure.
*/
struct ksm_scan {
- struct mm_slot *mm_slot;
+ struct ksm_mm_slot *mm_slot;
unsigned long address;
- struct rmap_item **rmap_list;
+ struct ksm_rmap_item **rmap_list;
unsigned long seqnr;
};
/**
- * struct stable_node - node of the stable rbtree
+ * struct ksm_stable_node - node of the stable rbtree
* @node: rb node of this ksm page in the stable tree
* @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
* @hlist_dup: linked into the stable_node->hlist with a stable_node chain
@@ -153,7 +150,7 @@ struct ksm_scan {
* @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
* @nid: NUMA node id of stable tree in which linked (may not match kpfn)
*/
-struct stable_node {
+struct ksm_stable_node {
union {
struct rb_node node; /* when node of stable tree */
struct { /* when listed for migration */
@@ -182,7 +179,7 @@ struct stable_node {
};
/**
- * struct rmap_item - reverse mapping item for virtual addresses
+ * struct ksm_rmap_item - reverse mapping item for virtual addresses
* @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
* @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
* @nid: NUMA node id of unstable tree in which linked (may not match page)
@@ -193,8 +190,8 @@ struct stable_node {
* @head: pointer to stable_node heading this list in the stable tree
* @hlist: link into hlist of rmap_items hanging off that stable_node
*/
-struct rmap_item {
- struct rmap_item *rmap_list;
+struct ksm_rmap_item {
+ struct ksm_rmap_item *rmap_list;
union {
struct anon_vma *anon_vma; /* when stable */
#ifdef CONFIG_NUMA
@@ -207,7 +204,7 @@ struct rmap_item {
union {
struct rb_node node; /* when node of unstable tree */
struct { /* when listed from stable tree */
- struct stable_node *head;
+ struct ksm_stable_node *head;
struct hlist_node hlist;
};
};
@@ -230,8 +227,8 @@ static LIST_HEAD(migrate_nodes);
#define MM_SLOTS_HASH_BITS 10
static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
-static struct mm_slot ksm_mm_head = {
- .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
+static struct ksm_mm_slot ksm_mm_head = {
+ .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
};
static struct ksm_scan ksm_scan = {
.mm_slot = &ksm_mm_head,
@@ -298,21 +295,21 @@ static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
static DEFINE_MUTEX(ksm_thread_mutex);
static DEFINE_SPINLOCK(ksm_mmlist_lock);
-#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
+#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
sizeof(struct __struct), __alignof__(struct __struct),\
(__flags), NULL)
static int __init ksm_slab_init(void)
{
- rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
+ rmap_item_cache = KSM_KMEM_CACHE(ksm_rmap_item, 0);
if (!rmap_item_cache)
goto out;
- stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
+ stable_node_cache = KSM_KMEM_CACHE(ksm_stable_node, 0);
if (!stable_node_cache)
goto out_free1;
- mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
+ mm_slot_cache = KSM_KMEM_CACHE(ksm_mm_slot, 0);
if (!mm_slot_cache)
goto out_free2;
@@ -334,18 +331,18 @@ static void __init ksm_slab_free(void)
mm_slot_cache = NULL;
}
-static __always_inline bool is_stable_node_chain(struct stable_node *chain)
+static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
{
return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
}
-static __always_inline bool is_stable_node_dup(struct stable_node *dup)
+static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
{
return dup->head == STABLE_NODE_DUP_HEAD;
}
-static inline void stable_node_chain_add_dup(struct stable_node *dup,
- struct stable_node *chain)
+static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
+ struct ksm_stable_node *chain)
{
VM_BUG_ON(is_stable_node_dup(dup));
dup->head = STABLE_NODE_DUP_HEAD;
@@ -354,14 +351,14 @@ static inline void stable_node_chain_add_dup(struct stable_node *dup,
ksm_stable_node_dups++;
}
-static inline void __stable_node_dup_del(struct stable_node *dup)
+static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
{
VM_BUG_ON(!is_stable_node_dup(dup));
hlist_del(&dup->hlist_dup);
ksm_stable_node_dups--;
}
-static inline void stable_node_dup_del(struct stable_node *dup)
+static inline void stable_node_dup_del(struct ksm_stable_node *dup)
{
VM_BUG_ON(is_stable_node_chain(dup));
if (is_stable_node_dup(dup))
@@ -373,9 +370,9 @@ static inline void stable_node_dup_del(struct stable_node *dup)
#endif
}
-static inline struct rmap_item *alloc_rmap_item(void)
+static inline struct ksm_rmap_item *alloc_rmap_item(void)
{
- struct rmap_item *rmap_item;
+ struct ksm_rmap_item *rmap_item;
rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
__GFP_NORETRY | __GFP_NOWARN);
@@ -384,14 +381,15 @@ static inline struct rmap_item *alloc_rmap_item(void)
return rmap_item;
}
-static inline void free_rmap_item(struct rmap_item *rmap_item)
+static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
{
ksm_rmap_items--;
+ rmap_item->mm->ksm_rmap_items--;
rmap_item->mm = NULL; /* debug safety */
kmem_cache_free(rmap_item_cache, rmap_item);
}
-static inline struct stable_node *alloc_stable_node(void)
+static inline struct ksm_stable_node *alloc_stable_node(void)
{
/*
* The allocation can take too long with GFP_KERNEL when memory is under
@@ -401,43 +399,13 @@ static inline struct stable_node *alloc_stable_node(void)
return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
}
-static inline void free_stable_node(struct stable_node *stable_node)
+static inline void free_stable_node(struct ksm_stable_node *stable_node)
{
VM_BUG_ON(stable_node->rmap_hlist_len &&
!is_stable_node_chain(stable_node));
kmem_cache_free(stable_node_cache, stable_node);
}
-static inline struct mm_slot *alloc_mm_slot(void)
-{
- if (!mm_slot_cache) /* initialization failed */
- return NULL;
- return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
-}
-
-static inline void free_mm_slot(struct mm_slot *mm_slot)
-{
- kmem_cache_free(mm_slot_cache, mm_slot);
-}
-
-static struct mm_slot *get_mm_slot(struct mm_struct *mm)
-{
- struct mm_slot *slot;
-
- hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
- if (slot->mm == mm)
- return slot;
-
- return NULL;
-}
-
-static void insert_to_mm_slots_hash(struct mm_struct *mm,
- struct mm_slot *mm_slot)
-{
- mm_slot->mm = mm;
- hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
-}
-
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
@@ -475,7 +443,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
cond_resched();
page = follow_page(vma, addr,
FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
- if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+ if (IS_ERR_OR_NULL(page))
break;
if (PageKsm(page))
ret = handle_mm_fault(vma, addr,
@@ -528,7 +496,7 @@ static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
return vma;
}
-static void break_cow(struct rmap_item *rmap_item)
+static void break_cow(struct ksm_rmap_item *rmap_item)
{
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
@@ -547,7 +515,7 @@ static void break_cow(struct rmap_item *rmap_item)
mmap_read_unlock(mm);
}
-static struct page *get_mergeable_page(struct rmap_item *rmap_item)
+static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
{
struct mm_struct *mm = rmap_item->mm;
unsigned long addr = rmap_item->address;
@@ -560,12 +528,15 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
goto out;
page = follow_page(vma, addr, FOLL_GET);
- if (IS_ERR_OR_NULL(page) || is_zone_device_page(page))
+ if (IS_ERR_OR_NULL(page))
goto out;
+ if (is_zone_device_page(page))
+ goto out_putpage;
if (PageAnon(page)) {
flush_anon_page(vma, page, addr);
flush_dcache_page(page);
} else {
+out_putpage:
put_page(page);
out:
page = NULL;
@@ -585,10 +556,10 @@ static inline int get_kpfn_nid(unsigned long kpfn)
return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
}
-static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
+static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
struct rb_root *root)
{
- struct stable_node *chain = alloc_stable_node();
+ struct ksm_stable_node *chain = alloc_stable_node();
VM_BUG_ON(is_stable_node_chain(dup));
if (likely(chain)) {
INIT_HLIST_HEAD(&chain->hlist);
@@ -618,7 +589,7 @@ static struct stable_node *alloc_stable_node_chain(struct stable_node *dup,
return chain;
}
-static inline void free_stable_node_chain(struct stable_node *chain,
+static inline void free_stable_node_chain(struct ksm_stable_node *chain,
struct rb_root *root)
{
rb_erase(&chain->node, root);
@@ -626,9 +597,9 @@ static inline void free_stable_node_chain(struct stable_node *chain,
ksm_stable_node_chains--;
}
-static void remove_node_from_stable_tree(struct stable_node *stable_node)
+static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
{
- struct rmap_item *rmap_item;
+ struct ksm_rmap_item *rmap_item;
/* check it's not STABLE_NODE_CHAIN or negative */
BUG_ON(stable_node->rmap_hlist_len < 0);
@@ -690,7 +661,7 @@ enum get_ksm_page_flags {
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct stable_node *stable_node,
+static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
enum get_ksm_page_flags flags)
{
struct page *page;
@@ -769,10 +740,10 @@ stale:
* Removing rmap_item from stable or unstable tree.
* This function will clean the information from the stable/unstable tree.
*/
-static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
+static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
{
if (rmap_item->address & STABLE_FLAG) {
- struct stable_node *stable_node;
+ struct ksm_stable_node *stable_node;
struct page *page;
stable_node = rmap_item->head;
@@ -819,10 +790,10 @@ out:
cond_resched(); /* we're called from many long loops */
}
-static void remove_trailing_rmap_items(struct rmap_item **rmap_list)
+static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
{
while (*rmap_list) {
- struct rmap_item *rmap_item = *rmap_list;
+ struct ksm_rmap_item *rmap_item = *rmap_list;
*rmap_list = rmap_item->rmap_list;
remove_rmap_item_from_tree(rmap_item);
free_rmap_item(rmap_item);
@@ -859,18 +830,18 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err;
}
-static inline struct stable_node *folio_stable_node(struct folio *folio)
+static inline struct ksm_stable_node *folio_stable_node(struct folio *folio)
{
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
}
-static inline struct stable_node *page_stable_node(struct page *page)
+static inline struct ksm_stable_node *page_stable_node(struct page *page)
{
return folio_stable_node(page_folio(page));
}
static inline void set_page_stable_node(struct page *page,
- struct stable_node *stable_node)
+ struct ksm_stable_node *stable_node)
{
VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page);
page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
@@ -880,7 +851,7 @@ static inline void set_page_stable_node(struct page *page,
/*
* Only called through the sysfs control interface:
*/
-static int remove_stable_node(struct stable_node *stable_node)
+static int remove_stable_node(struct ksm_stable_node *stable_node)
{
struct page *page;
int err;
@@ -918,10 +889,10 @@ static int remove_stable_node(struct stable_node *stable_node)
return err;
}
-static int remove_stable_node_chain(struct stable_node *stable_node,
+static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
struct rb_root *root)
{
- struct stable_node *dup;
+ struct ksm_stable_node *dup;
struct hlist_node *hlist_safe;
if (!is_stable_node_chain(stable_node)) {
@@ -945,14 +916,14 @@ static int remove_stable_node_chain(struct stable_node *stable_node,
static int remove_all_stable_nodes(void)
{
- struct stable_node *stable_node, *next;
+ struct ksm_stable_node *stable_node, *next;
int nid;
int err = 0;
for (nid = 0; nid < ksm_nr_node_ids; nid++) {
while (root_stable_tree[nid].rb_node) {
stable_node = rb_entry(root_stable_tree[nid].rb_node,
- struct stable_node, node);
+ struct ksm_stable_node, node);
if (remove_stable_node_chain(stable_node,
root_stable_tree + nid)) {
err = -EBUSY;
@@ -971,21 +942,25 @@ static int remove_all_stable_nodes(void)
static int unmerge_and_remove_all_rmap_items(void)
{
- struct mm_slot *mm_slot;
+ struct ksm_mm_slot *mm_slot;
+ struct mm_slot *slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int err = 0;
spin_lock(&ksm_mmlist_lock);
- ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
- struct mm_slot, mm_list);
+ slot = list_entry(ksm_mm_head.slot.mm_node.next,
+ struct mm_slot, mm_node);
+ ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
spin_unlock(&ksm_mmlist_lock);
- for (mm_slot = ksm_scan.mm_slot;
- mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
- mm = mm_slot->mm;
+ for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
+ mm_slot = ksm_scan.mm_slot) {
+ VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
+
+ mm = mm_slot->slot.mm;
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (ksm_test_exit(mm))
break;
if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
@@ -1000,14 +975,15 @@ static int unmerge_and_remove_all_rmap_items(void)
mmap_read_unlock(mm);
spin_lock(&ksm_mmlist_lock);
- ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
- struct mm_slot, mm_list);
+ slot = list_entry(mm_slot->slot.mm_node.next,
+ struct mm_slot, mm_node);
+ ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
if (ksm_test_exit(mm)) {
- hash_del(&mm_slot->link);
- list_del(&mm_slot->mm_list);
+ hash_del(&mm_slot->slot.hash);
+ list_del(&mm_slot->slot.mm_node);
spin_unlock(&ksm_mmlist_lock);
- free_mm_slot(mm_slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else
@@ -1095,6 +1071,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
goto out_unlock;
}
+ /* See page_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive && page_try_share_anon_rmap(page)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
@@ -1133,7 +1110,9 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
struct page *kpage, pte_t orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
+ struct folio *folio;
pmd_t *pmd;
+ pmd_t pmde;
pte_t *ptep;
pte_t newpte;
spinlock_t *ptl;
@@ -1148,6 +1127,15 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
pmd = mm_find_pmd(mm, addr);
if (!pmd)
goto out;
+ /*
+ * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
+ * without holding anon_vma lock for write. So when looking for a
+ * genuine pmde (in which to find pte), test present and !THP together.
+ */
+ pmde = *pmd;
+ barrier();
+ if (!pmd_present(pmde) || pmd_trans_huge(pmde))
+ goto out;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
addr + PAGE_SIZE);
@@ -1191,10 +1179,11 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
ptep_clear_flush(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, newpte);
+ folio = page_folio(page);
page_remove_rmap(page, vma, false);
- if (!page_mapped(page))
- try_to_free_swap(page);
- put_page(page);
+ if (!folio_mapped(folio))
+ folio_free_swap(folio);
+ folio_put(folio);
pte_unmap_unlock(ptep, ptl);
err = 0;
@@ -1278,7 +1267,7 @@ out:
*
* This function returns 0 if the pages were merged, -EFAULT otherwise.
*/
-static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
+static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
struct page *page, struct page *kpage)
{
struct mm_struct *mm = rmap_item->mm;
@@ -1315,9 +1304,9 @@ out:
* Note that this function upgrades page to ksm page: if one of the pages
* is already a ksm page, try_to_merge_with_ksm_page should be used.
*/
-static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
+static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
struct page *page,
- struct rmap_item *tree_rmap_item,
+ struct ksm_rmap_item *tree_rmap_item,
struct page *tree_page)
{
int err;
@@ -1337,7 +1326,7 @@ static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
}
static __always_inline
-bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
+bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
{
VM_BUG_ON(stable_node->rmap_hlist_len < 0);
/*
@@ -1351,17 +1340,17 @@ bool __is_page_sharing_candidate(struct stable_node *stable_node, int offset)
}
static __always_inline
-bool is_page_sharing_candidate(struct stable_node *stable_node)
+bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
{
return __is_page_sharing_candidate(stable_node, 0);
}
-static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
- struct stable_node **_stable_node,
+static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
+ struct ksm_stable_node **_stable_node,
struct rb_root *root,
bool prune_stale_stable_nodes)
{
- struct stable_node *dup, *found = NULL, *stable_node = *_stable_node;
+ struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
struct hlist_node *hlist_safe;
struct page *_tree_page, *tree_page = NULL;
int nr = 0;
@@ -1475,7 +1464,7 @@ static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
return tree_page;
}
-static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
+static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
struct rb_root *root)
{
if (!is_stable_node_chain(stable_node))
@@ -1502,12 +1491,12 @@ static struct stable_node *stable_node_dup_any(struct stable_node *stable_node,
* function and will be overwritten in all cases, the caller doesn't
* need to initialize it.
*/
-static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
- struct stable_node **_stable_node,
+static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
+ struct ksm_stable_node **_stable_node,
struct rb_root *root,
bool prune_stale_stable_nodes)
{
- struct stable_node *stable_node = *_stable_node;
+ struct ksm_stable_node *stable_node = *_stable_node;
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
@@ -1524,18 +1513,18 @@ static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
prune_stale_stable_nodes);
}
-static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
- struct stable_node **s_n,
+static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
+ struct ksm_stable_node **s_n,
struct rb_root *root)
{
return __stable_node_chain(s_n_d, s_n, root, true);
}
-static __always_inline struct page *chain(struct stable_node **s_n_d,
- struct stable_node *s_n,
+static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
+ struct ksm_stable_node *s_n,
struct rb_root *root)
{
- struct stable_node *old_stable_node = s_n;
+ struct ksm_stable_node *old_stable_node = s_n;
struct page *tree_page;
tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
@@ -1559,8 +1548,8 @@ static struct page *stable_tree_search(struct page *page)
struct rb_root *root;
struct rb_node **new;
struct rb_node *parent;
- struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
- struct stable_node *page_node;
+ struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
+ struct ksm_stable_node *page_node;
page_node = page_stable_node(page);
if (page_node && page_node->head != &migrate_nodes) {
@@ -1580,7 +1569,7 @@ again:
int ret;
cond_resched();
- stable_node = rb_entry(*new, struct stable_node, node);
+ stable_node = rb_entry(*new, struct ksm_stable_node, node);
stable_node_any = NULL;
tree_page = chain_prune(&stable_node_dup, &stable_node, root);
/*
@@ -1803,14 +1792,14 @@ chain_append:
* This function returns the stable tree node just allocated on success,
* NULL otherwise.
*/
-static struct stable_node *stable_tree_insert(struct page *kpage)
+static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
{
int nid;
unsigned long kpfn;
struct rb_root *root;
struct rb_node **new;
struct rb_node *parent;
- struct stable_node *stable_node, *stable_node_dup, *stable_node_any;
+ struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
bool need_chain = false;
kpfn = page_to_pfn(kpage);
@@ -1825,7 +1814,7 @@ again:
int ret;
cond_resched();
- stable_node = rb_entry(*new, struct stable_node, node);
+ stable_node = rb_entry(*new, struct ksm_stable_node, node);
stable_node_any = NULL;
tree_page = chain(&stable_node_dup, stable_node, root);
if (!stable_node_dup) {
@@ -1894,7 +1883,7 @@ again:
rb_insert_color(&stable_node_dup->node, root);
} else {
if (!is_stable_node_chain(stable_node)) {
- struct stable_node *orig = stable_node;
+ struct ksm_stable_node *orig = stable_node;
/* chain is missing so create it */
stable_node = alloc_stable_node_chain(orig, root);
if (!stable_node) {
@@ -1923,7 +1912,7 @@ again:
* the same walking algorithm in an rbtree.
*/
static
-struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
+struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
struct page *page,
struct page **tree_pagep)
{
@@ -1937,12 +1926,12 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
new = &root->rb_node;
while (*new) {
- struct rmap_item *tree_rmap_item;
+ struct ksm_rmap_item *tree_rmap_item;
struct page *tree_page;
int ret;
cond_resched();
- tree_rmap_item = rb_entry(*new, struct rmap_item, node);
+ tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
tree_page = get_mergeable_page(tree_rmap_item);
if (!tree_page)
return NULL;
@@ -1994,8 +1983,8 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
* rmap_items hanging off a given node of the stable tree, all sharing
* the same ksm page.
*/
-static void stable_tree_append(struct rmap_item *rmap_item,
- struct stable_node *stable_node,
+static void stable_tree_append(struct ksm_rmap_item *rmap_item,
+ struct ksm_stable_node *stable_node,
bool max_page_sharing_bypass)
{
/*
@@ -2037,12 +2026,12 @@ static void stable_tree_append(struct rmap_item *rmap_item,
* @page: the page that we are searching identical page to.
* @rmap_item: the reverse mapping into the virtual address of this page
*/
-static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
+static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
{
struct mm_struct *mm = rmap_item->mm;
- struct rmap_item *tree_rmap_item;
+ struct ksm_rmap_item *tree_rmap_item;
struct page *tree_page = NULL;
- struct stable_node *stable_node;
+ struct ksm_stable_node *stable_node;
struct page *kpage;
unsigned int checksum;
int err;
@@ -2198,11 +2187,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
}
}
-static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
- struct rmap_item **rmap_list,
+static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
+ struct ksm_rmap_item **rmap_list,
unsigned long addr)
{
- struct rmap_item *rmap_item;
+ struct ksm_rmap_item *rmap_item;
while (*rmap_list) {
rmap_item = *rmap_list;
@@ -2218,7 +2207,8 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
rmap_item = alloc_rmap_item();
if (rmap_item) {
/* It has already been zeroed */
- rmap_item->mm = mm_slot->mm;
+ rmap_item->mm = mm_slot->slot.mm;
+ rmap_item->mm->ksm_rmap_items++;
rmap_item->address = addr;
rmap_item->rmap_list = *rmap_list;
*rmap_list = rmap_item;
@@ -2226,19 +2216,21 @@ static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
return rmap_item;
}
-static struct rmap_item *scan_get_next_rmap_item(struct page **page)
+static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
{
struct mm_struct *mm;
+ struct ksm_mm_slot *mm_slot;
struct mm_slot *slot;
struct vm_area_struct *vma;
- struct rmap_item *rmap_item;
+ struct ksm_rmap_item *rmap_item;
+ struct vma_iterator vmi;
int nid;
- if (list_empty(&ksm_mm_head.mm_list))
+ if (list_empty(&ksm_mm_head.slot.mm_node))
return NULL;
- slot = ksm_scan.mm_slot;
- if (slot == &ksm_mm_head) {
+ mm_slot = ksm_scan.mm_slot;
+ if (mm_slot == &ksm_mm_head) {
/*
* A number of pages can hang around indefinitely on per-cpu
* pagevecs, raised page count preventing write_protect_page
@@ -2258,7 +2250,7 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
* so prune them once before each full scan.
*/
if (!ksm_merge_across_nodes) {
- struct stable_node *stable_node, *next;
+ struct ksm_stable_node *stable_node, *next;
struct page *page;
list_for_each_entry_safe(stable_node, next,
@@ -2275,28 +2267,31 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
root_unstable_tree[nid] = RB_ROOT;
spin_lock(&ksm_mmlist_lock);
- slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
- ksm_scan.mm_slot = slot;
+ slot = list_entry(mm_slot->slot.mm_node.next,
+ struct mm_slot, mm_node);
+ mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
+ ksm_scan.mm_slot = mm_slot;
spin_unlock(&ksm_mmlist_lock);
/*
* Although we tested list_empty() above, a racing __ksm_exit
* of the last mm on the list may have removed it since then.
*/
- if (slot == &ksm_mm_head)
+ if (mm_slot == &ksm_mm_head)
return NULL;
next_mm:
ksm_scan.address = 0;
- ksm_scan.rmap_list = &slot->rmap_list;
+ ksm_scan.rmap_list = &mm_slot->rmap_list;
}
+ slot = &mm_slot->slot;
mm = slot->mm;
+ vma_iter_init(&vmi, mm, ksm_scan.address);
+
mmap_read_lock(mm);
if (ksm_test_exit(mm))
- vma = NULL;
- else
- vma = find_vma(mm, ksm_scan.address);
+ goto no_vmas;
- for (; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (!(vma->vm_flags & VM_MERGEABLE))
continue;
if (ksm_scan.address < vma->vm_start)
@@ -2308,15 +2303,17 @@ next_mm:
if (ksm_test_exit(mm))
break;
*page = follow_page(vma, ksm_scan.address, FOLL_GET);
- if (IS_ERR_OR_NULL(*page) || is_zone_device_page(*page)) {
+ if (IS_ERR_OR_NULL(*page)) {
ksm_scan.address += PAGE_SIZE;
cond_resched();
continue;
}
+ if (is_zone_device_page(*page))
+ goto next_page;
if (PageAnon(*page)) {
flush_anon_page(vma, *page, ksm_scan.address);
flush_dcache_page(*page);
- rmap_item = get_next_rmap_item(slot,
+ rmap_item = get_next_rmap_item(mm_slot,
ksm_scan.rmap_list, ksm_scan.address);
if (rmap_item) {
ksm_scan.rmap_list =
@@ -2327,6 +2324,7 @@ next_mm:
mmap_read_unlock(mm);
return rmap_item;
}
+next_page:
put_page(*page);
ksm_scan.address += PAGE_SIZE;
cond_resched();
@@ -2334,8 +2332,9 @@ next_mm:
}
if (ksm_test_exit(mm)) {
+no_vmas:
ksm_scan.address = 0;
- ksm_scan.rmap_list = &slot->rmap_list;
+ ksm_scan.rmap_list = &mm_slot->rmap_list;
}
/*
* Nuke all the rmap_items that are above this current rmap:
@@ -2344,8 +2343,9 @@ next_mm:
remove_trailing_rmap_items(ksm_scan.rmap_list);
spin_lock(&ksm_mmlist_lock);
- ksm_scan.mm_slot = list_entry(slot->mm_list.next,
- struct mm_slot, mm_list);
+ slot = list_entry(mm_slot->slot.mm_node.next,
+ struct mm_slot, mm_node);
+ ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
if (ksm_scan.address == 0) {
/*
* We've completed a full scan of all vmas, holding mmap_lock
@@ -2356,11 +2356,11 @@ next_mm:
* or when all VM_MERGEABLE areas have been unmapped (and
* mmap_lock then protects against race with MADV_MERGEABLE).
*/
- hash_del(&slot->link);
- list_del(&slot->mm_list);
+ hash_del(&mm_slot->slot.hash);
+ list_del(&mm_slot->slot.mm_node);
spin_unlock(&ksm_mmlist_lock);
- free_mm_slot(slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmap_read_unlock(mm);
mmdrop(mm);
@@ -2377,8 +2377,8 @@ next_mm:
}
/* Repeat until we've completed scanning the whole list */
- slot = ksm_scan.mm_slot;
- if (slot != &ksm_mm_head)
+ mm_slot = ksm_scan.mm_slot;
+ if (mm_slot != &ksm_mm_head)
goto next_mm;
ksm_scan.seqnr++;
@@ -2391,7 +2391,7 @@ next_mm:
*/
static void ksm_do_scan(unsigned int scan_npages)
{
- struct rmap_item *rmap_item;
+ struct ksm_rmap_item *rmap_item;
struct page *page;
while (scan_npages-- && likely(!freezing(current))) {
@@ -2406,7 +2406,7 @@ static void ksm_do_scan(unsigned int scan_npages)
static int ksmd_should_run(void)
{
- return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
+ return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
}
static int ksm_scan_thread(void *nothing)
@@ -2495,18 +2495,21 @@ EXPORT_SYMBOL_GPL(ksm_madvise);
int __ksm_enter(struct mm_struct *mm)
{
- struct mm_slot *mm_slot;
+ struct ksm_mm_slot *mm_slot;
+ struct mm_slot *slot;
int needs_wakeup;
- mm_slot = alloc_mm_slot();
+ mm_slot = mm_slot_alloc(mm_slot_cache);
if (!mm_slot)
return -ENOMEM;
+ slot = &mm_slot->slot;
+
/* Check ksm_run too? Would need tighter locking */
- needs_wakeup = list_empty(&ksm_mm_head.mm_list);
+ needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
spin_lock(&ksm_mmlist_lock);
- insert_to_mm_slots_hash(mm, mm_slot);
+ mm_slot_insert(mm_slots_hash, mm, slot);
/*
* When KSM_RUN_MERGE (or KSM_RUN_STOP),
* insert just behind the scanning cursor, to let the area settle
@@ -2518,9 +2521,9 @@ int __ksm_enter(struct mm_struct *mm)
* missed: then we might as well insert at the end of the list.
*/
if (ksm_run & KSM_RUN_UNMERGE)
- list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
+ list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
else
- list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
+ list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
spin_unlock(&ksm_mmlist_lock);
set_bit(MMF_VM_MERGEABLE, &mm->flags);
@@ -2534,7 +2537,8 @@ int __ksm_enter(struct mm_struct *mm)
void __ksm_exit(struct mm_struct *mm)
{
- struct mm_slot *mm_slot;
+ struct ksm_mm_slot *mm_slot;
+ struct mm_slot *slot;
int easy_to_free = 0;
/*
@@ -2547,21 +2551,22 @@ void __ksm_exit(struct mm_struct *mm)
*/
spin_lock(&ksm_mmlist_lock);
- mm_slot = get_mm_slot(mm);
+ slot = mm_slot_lookup(mm_slots_hash, mm);
+ mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
if (mm_slot && ksm_scan.mm_slot != mm_slot) {
if (!mm_slot->rmap_list) {
- hash_del(&mm_slot->link);
- list_del(&mm_slot->mm_list);
+ hash_del(&slot->hash);
+ list_del(&slot->mm_node);
easy_to_free = 1;
} else {
- list_move(&mm_slot->mm_list,
- &ksm_scan.mm_slot->mm_list);
+ list_move(&slot->mm_node,
+ &ksm_scan.mm_slot->slot.mm_node);
}
}
spin_unlock(&ksm_mmlist_lock);
if (easy_to_free) {
- free_mm_slot(mm_slot);
+ mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
@@ -2612,8 +2617,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
{
- struct stable_node *stable_node;
- struct rmap_item *rmap_item;
+ struct ksm_stable_node *stable_node;
+ struct ksm_rmap_item *rmap_item;
int search_new_forks = 0;
VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
@@ -2683,7 +2688,7 @@ again:
#ifdef CONFIG_MIGRATION
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
{
- struct stable_node *stable_node;
+ struct ksm_stable_node *stable_node;
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
@@ -2716,7 +2721,7 @@ static void wait_while_offlining(void)
}
}
-static bool stable_node_dup_remove_range(struct stable_node *stable_node,
+static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -2732,12 +2737,12 @@ static bool stable_node_dup_remove_range(struct stable_node *stable_node,
return false;
}
-static bool stable_node_chain_remove_range(struct stable_node *stable_node,
+static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
unsigned long start_pfn,
unsigned long end_pfn,
struct rb_root *root)
{
- struct stable_node *dup;
+ struct ksm_stable_node *dup;
struct hlist_node *hlist_safe;
if (!is_stable_node_chain(stable_node)) {
@@ -2761,14 +2766,14 @@ static bool stable_node_chain_remove_range(struct stable_node *stable_node,
static void ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
- struct stable_node *stable_node, *next;
+ struct ksm_stable_node *stable_node, *next;
struct rb_node *node;
int nid;
for (nid = 0; nid < ksm_nr_node_ids; nid++) {
node = rb_first(root_stable_tree + nid);
while (node) {
- stable_node = rb_entry(node, struct stable_node, node);
+ stable_node = rb_entry(node, struct ksm_stable_node, node);
if (stable_node_chain_remove_range(stable_node,
start_pfn, end_pfn,
root_stable_tree +
diff --git a/mm/madvise.c b/mm/madvise.c
index 9ff51650f4f0..2baa93ca2310 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -59,6 +59,7 @@ static int madvise_need_mmap_write(int behavior)
case MADV_FREE:
case MADV_POPULATE_READ:
case MADV_POPULATE_WRITE:
+ case MADV_COLLAPSE:
return 0;
default:
/* be safe, default to 1. list exceptions explicitly */
@@ -600,6 +601,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl;
pte_t *orig_pte, *pte, ptent;
+ struct folio *folio;
struct page *page;
int nr_swap = 0;
unsigned long next;
@@ -644,56 +646,56 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
page = vm_normal_page(vma, addr, ptent);
if (!page || is_zone_device_page(page))
continue;
+ folio = page_folio(page);
/*
- * If pmd isn't transhuge but the page is THP and
+ * If pmd isn't transhuge but the folio is large and
* is owned by only this process, split it and
* deactivate all pages.
*/
- if (PageTransCompound(page)) {
- if (page_mapcount(page) != 1)
+ if (folio_test_large(folio)) {
+ if (folio_mapcount(folio) != 1)
goto out;
- get_page(page);
- if (!trylock_page(page)) {
- put_page(page);
+ folio_get(folio);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
goto out;
}
pte_unmap_unlock(orig_pte, ptl);
- if (split_huge_page(page)) {
- unlock_page(page);
- put_page(page);
+ if (split_folio(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
goto out;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte--;
addr -= PAGE_SIZE;
continue;
}
- VM_BUG_ON_PAGE(PageTransCompound(page), page);
-
- if (PageSwapCache(page) || PageDirty(page)) {
- if (!trylock_page(page))
+ if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
+ if (!folio_trylock(folio))
continue;
/*
- * If page is shared with others, we couldn't clear
- * PG_dirty of the page.
+ * If folio is shared with others, we mustn't clear
+ * the folio's dirty flag.
*/
- if (page_mapcount(page) != 1) {
- unlock_page(page);
+ if (folio_mapcount(folio) != 1) {
+ folio_unlock(folio);
continue;
}
- if (PageSwapCache(page) && !try_to_free_swap(page)) {
- unlock_page(page);
+ if (folio_test_swapcache(folio) &&
+ !folio_free_swap(folio)) {
+ folio_unlock(folio);
continue;
}
- ClearPageDirty(page);
- unlock_page(page);
+ folio_clear_dirty(folio);
+ folio_unlock(folio);
}
if (pte_young(ptent) || pte_dirty(ptent)) {
@@ -711,7 +713,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
set_pte_at(mm, addr, pte, ptent);
tlb_remove_tlb_entry(tlb, pte, addr);
}
- mark_page_lazyfree(page);
+ mark_page_lazyfree(&folio->page);
}
out:
if (nr_swap) {
@@ -1060,6 +1062,8 @@ static int madvise_vma_behavior(struct vm_area_struct *vma,
if (error)
goto out;
break;
+ case MADV_COLLAPSE:
+ return madvise_collapse(vma, prev, start, end);
}
anon_name = anon_vma_name(vma);
@@ -1153,6 +1157,7 @@ madvise_behavior_valid(int behavior)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
case MADV_HUGEPAGE:
case MADV_NOHUGEPAGE:
+ case MADV_COLLAPSE:
#endif
case MADV_DONTDUMP:
case MADV_DODUMP:
@@ -1169,13 +1174,13 @@ madvise_behavior_valid(int behavior)
}
}
-static bool
-process_madvise_behavior_valid(int behavior)
+static bool process_madvise_behavior_valid(int behavior)
{
switch (behavior) {
case MADV_COLD:
case MADV_PAGEOUT:
case MADV_WILLNEED:
+ case MADV_COLLAPSE:
return true;
default:
return false;
@@ -1241,7 +1246,7 @@ int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
if (start >= end)
break;
if (prev)
- vma = prev->vm_next;
+ vma = find_vma(mm, prev->vm_end);
else /* madvise_remove dropped mmap_lock */
vma = find_vma(mm, start);
}
@@ -1342,6 +1347,7 @@ int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
* MADV_NOHUGEPAGE - mark the given range as not worth being backed by
* transparent huge pages so the existing pages will not be
* coalesced into THP and new pages will not be allocated as THP.
+ * MADV_COLLAPSE - synchronously coalesce pages into new THP.
* MADV_DONTDUMP - the application wants to prevent pages in the given range
* from being included in its core dump.
* MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
diff --git a/mm/memblock.c b/mm/memblock.c
index b5d3026979fc..511d4783dcf1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -2000,7 +2000,7 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside
* a pageblock
*/
- start = round_down(start, pageblock_nr_pages);
+ start = pageblock_start_pfn(start);
/*
* If we had a previous bank, and there is a space
@@ -2014,12 +2014,12 @@ static void __init free_unused_memmap(void)
* presume that there are no holes in the memory map inside
* a pageblock
*/
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
}
#ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
- prev_end = ALIGN(end, pageblock_nr_pages);
+ prev_end = pageblock_align(end);
free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
}
#endif
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bac2de4b9c42..2d8549ae1b30 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -88,13 +88,6 @@ static bool cgroup_memory_nosocket __ro_after_init;
/* Kernel memory accounting disabled? */
static bool cgroup_memory_nokmem __ro_after_init;
-/* Whether the swap controller is active */
-#ifdef CONFIG_MEMCG_SWAP
-static bool cgroup_memory_noswap __ro_after_init;
-#else
-#define cgroup_memory_noswap 1
-#endif
-
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif
@@ -102,7 +95,7 @@ static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
- return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
+ return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
}
#define THRESHOLDS_EVENTS_TARGET 128
@@ -662,6 +655,81 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
}
+/* Subset of vm_event_item to report for memcg event stats */
+static const unsigned int memcg_vm_event_stat[] = {
+ PGPGIN,
+ PGPGOUT,
+ PGSCAN_KSWAPD,
+ PGSCAN_DIRECT,
+ PGSTEAL_KSWAPD,
+ PGSTEAL_DIRECT,
+ PGFAULT,
+ PGMAJFAULT,
+ PGREFILL,
+ PGACTIVATE,
+ PGDEACTIVATE,
+ PGLAZYFREE,
+ PGLAZYFREED,
+#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
+ ZSWPIN,
+ ZSWPOUT,
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ THP_FAULT_ALLOC,
+ THP_COLLAPSE_ALLOC,
+#endif
+};
+
+#define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
+static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
+
+static void init_memcg_events(void)
+{
+ int i;
+
+ for (i = 0; i < NR_MEMCG_EVENTS; ++i)
+ mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
+}
+
+static inline int memcg_events_index(enum vm_event_item idx)
+{
+ return mem_cgroup_events_index[idx] - 1;
+}
+
+struct memcg_vmstats_percpu {
+ /* Local (CPU and cgroup) page state & events */
+ long state[MEMCG_NR_STAT];
+ unsigned long events[NR_MEMCG_EVENTS];
+
+ /* Delta calculation for lockless upward propagation */
+ long state_prev[MEMCG_NR_STAT];
+ unsigned long events_prev[NR_MEMCG_EVENTS];
+
+ /* Cgroup1: threshold notifications & softlimit tree updates */
+ unsigned long nr_page_events;
+ unsigned long targets[MEM_CGROUP_NTARGETS];
+};
+
+struct memcg_vmstats {
+ /* Aggregated (CPU and subtree) page state & events */
+ long state[MEMCG_NR_STAT];
+ unsigned long events[NR_MEMCG_EVENTS];
+
+ /* Pending child counts during tree propagation */
+ long state_pending[MEMCG_NR_STAT];
+ unsigned long events_pending[NR_MEMCG_EVENTS];
+};
+
+unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
+{
+ long x = READ_ONCE(memcg->vmstats->state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
+}
+
/**
* __mod_memcg_state - update cgroup memory statistics
* @memcg: the memory cgroup
@@ -809,27 +877,37 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
unsigned long count)
{
- if (mem_cgroup_disabled())
+ int index = memcg_events_index(idx);
+
+ if (mem_cgroup_disabled() || index < 0)
return;
memcg_stats_lock();
- __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
+ __this_cpu_add(memcg->vmstats_percpu->events[index], count);
memcg_rstat_updated(memcg, count);
memcg_stats_unlock();
}
static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
{
- return READ_ONCE(memcg->vmstats.events[event]);
+ int index = memcg_events_index(event);
+
+ if (index < 0)
+ return 0;
+ return READ_ONCE(memcg->vmstats->events[index]);
}
static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
{
long x = 0;
int cpu;
+ int index = memcg_events_index(event);
+
+ if (index < 0)
+ return 0;
for_each_possible_cpu(cpu)
- x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
+ x += per_cpu(memcg->vmstats_percpu->events[index], cpu);
return x;
}
@@ -1136,7 +1214,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
} while ((memcg = parent_mem_cgroup(memcg)));
/*
- * When cgruop1 non-hierarchy mode is used,
+ * When cgroup1 non-hierarchy mode is used,
* parent_mem_cgroup() does not walk all the way up to the
* cgroup root (root_mem_cgroup). So we have to handle
* dead_memcg from cgroup root separately.
@@ -1461,29 +1539,6 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
}
-/* Subset of vm_event_item to report for memcg event stats */
-static const unsigned int memcg_vm_event_stat[] = {
- PGSCAN_KSWAPD,
- PGSCAN_DIRECT,
- PGSTEAL_KSWAPD,
- PGSTEAL_DIRECT,
- PGFAULT,
- PGMAJFAULT,
- PGREFILL,
- PGACTIVATE,
- PGDEACTIVATE,
- PGLAZYFREE,
- PGLAZYFREED,
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
- ZSWPIN,
- ZSWPOUT,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- THP_FAULT_ALLOC,
- THP_COLLAPSE_ALLOC,
-#endif
-};
-
static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
{
struct seq_buf s;
@@ -1524,10 +1579,15 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize)
memcg_events(memcg, PGSTEAL_KSWAPD) +
memcg_events(memcg, PGSTEAL_DIRECT));
- for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++)
+ for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
+ if (memcg_vm_event_stat[i] == PGPGIN ||
+ memcg_vm_event_stat[i] == PGPGOUT)
+ continue;
+
seq_buf_printf(&s, "%s %lu\n",
vm_event_name(memcg_vm_event_stat[i]),
memcg_events(memcg, memcg_vm_event_stat[i]));
+ }
/* The above should easily fit into one page */
WARN_ON_ONCE(seq_buf_has_overflowed(&s));
@@ -1601,17 +1661,17 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
unsigned long max = READ_ONCE(memcg->memory.max);
- if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
- if (mem_cgroup_swappiness(memcg))
- max += min(READ_ONCE(memcg->swap.max),
- (unsigned long)total_swap_pages);
- } else { /* v1 */
+ if (do_memsw_account()) {
if (mem_cgroup_swappiness(memcg)) {
/* Calculate swap excess capacity from memsw limit */
unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
max += min(swap, (unsigned long)total_swap_pages);
}
+ } else {
+ if (mem_cgroup_swappiness(memcg))
+ max += min(READ_ONCE(memcg->swap.max),
+ (unsigned long)total_swap_pages);
}
return max;
}
@@ -2783,6 +2843,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
+ * - mem_cgroup_trylock_pages()
*/
folio->memcg_data = (unsigned long)memcg;
}
@@ -3356,7 +3417,7 @@ void split_page_memcg(struct page *head, unsigned int nr)
css_get_many(&memcg->css, nr - 1);
}
-#ifdef CONFIG_MEMCG_SWAP
+#ifdef CONFIG_SWAP
/**
* mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
* @entry: swap entry to be moved
@@ -3969,6 +4030,8 @@ static const unsigned int memcg1_stats[] = {
NR_FILE_MAPPED,
NR_FILE_DIRTY,
NR_WRITEBACK,
+ WORKINGSET_REFAULT_ANON,
+ WORKINGSET_REFAULT_FILE,
MEMCG_SWAP,
};
@@ -3982,6 +4045,8 @@ static const char *const memcg1_stat_names[] = {
"mapped_file",
"dirty",
"writeback",
+ "workingset_refault_anon",
+ "workingset_refault_file",
"swap",
};
@@ -4010,7 +4075,8 @@ static int memcg_stat_show(struct seq_file *m, void *v)
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
nr = memcg_page_state_local(memcg, memcg1_stats[i]);
- seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
+ seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
+ nr * memcg_page_state_unit(memcg1_stats[i]));
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -4041,7 +4107,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
continue;
nr = memcg_page_state(memcg, memcg1_stats[i]);
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
- (u64)nr * PAGE_SIZE);
+ (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
@@ -5158,12 +5224,14 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
for_each_node(node)
free_mem_cgroup_per_node_info(memcg, node);
+ kfree(memcg->vmstats);
free_percpu(memcg->vmstats_percpu);
kfree(memcg);
}
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
+ lru_gen_exit_memcg(memcg);
memcg_wb_domain_exit(memcg);
__mem_cgroup_free(memcg);
}
@@ -5186,6 +5254,10 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
goto fail;
}
+ memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
+ if (!memcg->vmstats)
+ goto fail;
+
memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
GFP_KERNEL_ACCOUNT);
if (!memcg->vmstats_percpu)
@@ -5222,6 +5294,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
memcg->deferred_split_queue.split_queue_len = 0;
#endif
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+ lru_gen_init_memcg(memcg);
return memcg;
fail:
mem_cgroup_id_remove(memcg);
@@ -5256,6 +5329,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
page_counter_init(&memcg->kmem, &parent->kmem);
page_counter_init(&memcg->tcpmem, &parent->tcpmem);
} else {
+ init_memcg_events();
page_counter_init(&memcg->memory, NULL);
page_counter_init(&memcg->swap, NULL);
page_counter_init(&memcg->kmem, NULL);
@@ -5404,9 +5478,9 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
* below us. We're in a per-cpu loop here and this is
* a global counter, so the first cycle will get them.
*/
- delta = memcg->vmstats.state_pending[i];
+ delta = memcg->vmstats->state_pending[i];
if (delta)
- memcg->vmstats.state_pending[i] = 0;
+ memcg->vmstats->state_pending[i] = 0;
/* Add CPU changes on this level since the last flush */
v = READ_ONCE(statc->state[i]);
@@ -5419,15 +5493,15 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
continue;
/* Aggregate counts on this level and propagate upwards */
- memcg->vmstats.state[i] += delta;
+ memcg->vmstats->state[i] += delta;
if (parent)
- parent->vmstats.state_pending[i] += delta;
+ parent->vmstats->state_pending[i] += delta;
}
- for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
- delta = memcg->vmstats.events_pending[i];
+ for (i = 0; i < NR_MEMCG_EVENTS; i++) {
+ delta = memcg->vmstats->events_pending[i];
if (delta)
- memcg->vmstats.events_pending[i] = 0;
+ memcg->vmstats->events_pending[i] = 0;
v = READ_ONCE(statc->events[i]);
if (v != statc->events_prev[i]) {
@@ -5438,9 +5512,9 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
if (!delta)
continue;
- memcg->vmstats.events[i] += delta;
+ memcg->vmstats->events[i] += delta;
if (parent)
- parent->vmstats.events_pending[i] += delta;
+ parent->vmstats->events_pending[i] += delta;
}
for_each_node_state(nid, N_MEMORY) {
@@ -5555,7 +5629,7 @@ static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
return NULL;
/*
- * Because lookup_swap_cache() updates some statistics counter,
+ * Because swap_cache_get_folio() updates some statistics counter,
* we call find_get_page() with swapper_space directly.
*/
page = find_get_page(swap_address_space(ent), swp_offset(ent));
@@ -5865,7 +5939,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
unsigned long precharge;
mmap_read_lock(mm);
- walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
+ walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
mmap_read_unlock(mm);
precharge = mc.precharge;
@@ -6163,9 +6237,7 @@ retry:
* When we have consumed all precharges and failed in doing
* additional charge, the page walk just aborts.
*/
- walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
- NULL);
-
+ walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
mmap_read_unlock(mc.mm);
atomic_dec(&mc.from->moving_account);
}
@@ -6190,6 +6262,30 @@ static void mem_cgroup_move_task(void)
}
#endif
+#ifdef CONFIG_LRU_GEN
+static void mem_cgroup_attach(struct cgroup_taskset *tset)
+{
+ struct task_struct *task;
+ struct cgroup_subsys_state *css;
+
+ /* find the first leader if there is any */
+ cgroup_taskset_for_each_leader(task, css, tset)
+ break;
+
+ if (!task)
+ return;
+
+ task_lock(task);
+ if (task->mm && READ_ONCE(task->mm->owner) == task)
+ lru_gen_migrate_mm(task->mm);
+ task_unlock(task);
+}
+#else
+static void mem_cgroup_attach(struct cgroup_taskset *tset)
+{
+}
+#endif /* CONFIG_LRU_GEN */
+
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
if (value == PAGE_COUNTER_MAX)
@@ -6595,6 +6691,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
.css_reset = mem_cgroup_css_reset,
.css_rstat_flush = mem_cgroup_css_rstat_flush,
.can_attach = mem_cgroup_can_attach,
+ .attach = mem_cgroup_attach,
.cancel_attach = mem_cgroup_cancel_attach,
.post_attach = mem_cgroup_move_task,
.dfl_cftypes = memory_files,
@@ -6807,21 +6904,20 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
}
/**
- * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
- * @page: page to charge
+ * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
+ * @folio: folio to charge.
* @mm: mm context of the victim
* @gfp: reclaim mode
- * @entry: swap entry for which the page is allocated
+ * @entry: swap entry for which the folio is allocated
*
- * This function charges a page allocated for swapin. Please call this before
- * adding the page to the swapcache.
+ * This function charges a folio allocated for swapin. Please call this before
+ * adding the folio to the swapcache.
*
* Returns 0 on success. Otherwise, an error code is returned.
*/
-int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
+int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry)
{
- struct folio *folio = page_folio(page);
struct mem_cgroup *memcg;
unsigned short id;
int ret;
@@ -7194,7 +7290,7 @@ static int __init mem_cgroup_init(void)
}
subsys_initcall(mem_cgroup_init);
-#ifdef CONFIG_MEMCG_SWAP
+#ifdef CONFIG_SWAP
static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
{
while (!refcount_inc_not_zero(&memcg->id.ref)) {
@@ -7232,7 +7328,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
if (mem_cgroup_disabled())
return;
- if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ if (!do_memsw_account())
return;
memcg = folio_memcg(folio);
@@ -7261,7 +7357,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, nr_entries);
- if (!cgroup_memory_noswap && memcg != swap_memcg) {
+ if (memcg != swap_memcg) {
if (!mem_cgroup_is_root(swap_memcg))
page_counter_charge(&swap_memcg->memsw, nr_entries);
page_counter_uncharge(&memcg->memsw, nr_entries);
@@ -7297,7 +7393,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
struct mem_cgroup *memcg;
unsigned short oldid;
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ if (do_memsw_account())
return 0;
memcg = folio_memcg(folio);
@@ -7313,7 +7409,7 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
memcg = mem_cgroup_id_get_online(memcg);
- if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
+ if (!mem_cgroup_is_root(memcg) &&
!page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
memcg_memory_event(memcg, MEMCG_SWAP_MAX);
memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
@@ -7341,15 +7437,18 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
struct mem_cgroup *memcg;
unsigned short id;
+ if (mem_cgroup_disabled())
+ return;
+
id = swap_cgroup_record(entry, 0, nr_pages);
rcu_read_lock();
memcg = mem_cgroup_from_id(id);
if (memcg) {
- if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
- if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
- page_counter_uncharge(&memcg->swap, nr_pages);
- else
+ if (!mem_cgroup_is_root(memcg)) {
+ if (do_memsw_account())
page_counter_uncharge(&memcg->memsw, nr_pages);
+ else
+ page_counter_uncharge(&memcg->swap, nr_pages);
}
mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
mem_cgroup_id_put_many(memcg, nr_pages);
@@ -7361,7 +7460,7 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
{
long nr_swap_pages = get_nr_swap_pages();
- if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ if (mem_cgroup_disabled() || do_memsw_account())
return nr_swap_pages;
for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
nr_swap_pages = min_t(long, nr_swap_pages,
@@ -7370,18 +7469,18 @@ long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return nr_swap_pages;
}
-bool mem_cgroup_swap_full(struct page *page)
+bool mem_cgroup_swap_full(struct folio *folio)
{
struct mem_cgroup *memcg;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (vm_swap_full())
return true;
- if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+ if (do_memsw_account())
return false;
- memcg = page_memcg(page);
+ memcg = folio_memcg(folio);
if (!memcg)
return false;
@@ -7398,10 +7497,9 @@ bool mem_cgroup_swap_full(struct page *page)
static int __init setup_swap_account(char *s)
{
- if (!strcmp(s, "1"))
- cgroup_memory_noswap = false;
- else if (!strcmp(s, "0"))
- cgroup_memory_noswap = true;
+ pr_warn_once("The swapaccount= commandline option is deprecated. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
return 1;
}
__setup("swapaccount=", setup_swap_account);
@@ -7670,20 +7768,9 @@ static struct cftype zswap_files[] = {
};
#endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
-/*
- * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
- * instead of a core_initcall(), this could mean cgroup_memory_noswap still
- * remains set to false even when memcg is disabled via "cgroup_disable=memory"
- * boot parameter. This may result in premature OOPS inside
- * mem_cgroup_get_nr_swap_pages() function in corner cases.
- */
static int __init mem_cgroup_swap_init(void)
{
- /* No memory control -> no swap control */
if (mem_cgroup_disabled())
- cgroup_memory_noswap = true;
-
- if (cgroup_memory_noswap)
return 0;
WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
@@ -7693,6 +7780,6 @@ static int __init mem_cgroup_swap_init(void)
#endif
return 0;
}
-core_initcall(mem_cgroup_swap_init);
+subsys_initcall(mem_cgroup_swap_init);
-#endif /* CONFIG_MEMCG_SWAP */
+#endif /* CONFIG_SWAP */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e7ac570dda75..145bb561ddb3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -277,7 +277,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
* to SIG_IGN, but hopefully no one will do that?
*/
ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
- addr_lsb, t); /* synchronous? */
+ addr_lsb, t);
if (ret < 0)
pr_info("Error sending signal to %s:%d: %d\n",
t->comm, t->pid, ret);
@@ -413,7 +413,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
{
struct to_kill *tk, *next;
- list_for_each_entry_safe (tk, next, to_kill, nd) {
+ list_for_each_entry_safe(tk, next, to_kill, nd) {
if (forcekill) {
/*
* In case something went wrong with munmapping
@@ -437,6 +437,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
pr_err("%#lx: Cannot send advisory machine check signal to %s:%d\n",
pfn, tk->tsk->comm, tk->tsk->pid);
}
+ list_del(&tk->nd);
put_task_struct(tk->tsk);
kfree(tk);
}
@@ -520,15 +521,15 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
anon_vma_interval_tree_foreach(vmac, &av->rb_root,
pgoff, pgoff) {
vma = vmac->vma;
+ if (vma->vm_mm != t->mm)
+ continue;
if (!page_mapped_in_vma(page, vma))
continue;
- if (vma->vm_mm == t->mm)
- add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma,
- to_kill);
+ add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill);
}
}
read_unlock(&tasklist_lock);
- page_unlock_anon_vma_read(av);
+ anon_vma_unlock_read(av);
}
/*
@@ -634,7 +635,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
swp_entry_t swp = pte_to_swp_entry(pte);
if (is_hwpoison_entry(swp))
- pfn = hwpoison_entry_to_pfn(swp);
+ pfn = swp_offset_pfn(swp);
}
if (!pfn || pfn != poisoned_pfn)
@@ -1248,9 +1249,9 @@ static int __get_hwpoison_page(struct page *page, unsigned long flags)
return ret;
/*
- * This check prevents from calling get_hwpoison_unless_zero()
- * for any unsupported type of page in order to reduce the risk of
- * unexpected races caused by taking a page refcount.
+ * This check prevents from calling get_page_unless_zero() for any
+ * unsupported type of page in order to reduce the risk of unexpected
+ * races caused by taking a page refcount.
*/
if (!HWPoisonHandlable(head, flags))
return -EBUSY;
@@ -1401,14 +1402,14 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct address_space *mapping;
LIST_HEAD(tokill);
bool unmap_success;
- int kill = 1, forcekill;
+ int forcekill;
bool mlocked = PageMlocked(hpage);
/*
* Here we are interested only in user-mapped pages, so skip any
* other types of pages.
*/
- if (PageReserved(p) || PageSlab(p))
+ if (PageReserved(p) || PageSlab(p) || PageTable(p))
return true;
if (!(PageLRU(hpage) || PageHuge(p)))
return true;
@@ -1442,7 +1443,6 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (page_mkclean(hpage)) {
SetPageDirty(hpage);
} else {
- kill = 0;
ttu |= TTU_IGNORE_HWPOISON;
pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
pfn);
@@ -1453,12 +1453,8 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* First collect all the processes that have the page
* mapped in dirty form. This has to be done before try_to_unmap,
* because ttu takes the rmap data structures down.
- *
- * Error handling: We ignore errors here because
- * there's nothing that can be done.
*/
- if (kill)
- collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
+ collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
if (PageHuge(hpage) && !PageAnon(hpage)) {
/*
@@ -1500,7 +1496,8 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
- forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL);
+ forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
+ !unmap_success;
kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
return unmap_success;
@@ -1529,20 +1526,18 @@ static int identify_page_state(unsigned long pfn, struct page *p,
return page_action(ps, p, pfn);
}
-static int try_to_split_thp_page(struct page *page, const char *msg)
+static int try_to_split_thp_page(struct page *page)
{
+ int ret;
+
lock_page(page);
- if (unlikely(split_huge_page(page))) {
- unsigned long pfn = page_to_pfn(page);
+ ret = split_huge_page(page);
+ unlock_page(page);
- unlock_page(page);
- pr_info("%s: %#lx: thp split failed\n", msg, pfn);
+ if (unlikely(ret))
put_page(page);
- return -EBUSY;
- }
- unlock_page(page);
- return 0;
+ return ret;
}
static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
@@ -1867,8 +1862,10 @@ retry:
if (hwpoison_filter(p)) {
hugetlb_clear_page_hwpoison(head);
- res = -EOPNOTSUPP;
- goto out;
+ unlock_page(head);
+ if (res == 1)
+ put_page(head);
+ return -EOPNOTSUPP;
}
/*
@@ -2031,7 +2028,7 @@ try_again:
/*
* We need/can do nothing about count=0 pages.
* 1) it's a free page, and therefore in safe hand:
- * prep_new_page() will be the gate keeper.
+ * check_new_page() will be the gate keeper.
* 2) it's part of a non-compound high order page.
* Implies some kernel user: cannot stop them from
* R/W the page; let's pray that the page has been
@@ -2084,7 +2081,7 @@ try_again:
* page is a valid handlable page.
*/
SetPageHasHWPoisoned(hpage);
- if (try_to_split_thp_page(p, "Memory Failure") < 0) {
+ if (try_to_split_thp_page(p) < 0) {
action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
res = -EBUSY;
goto unlock_mutex;
@@ -2134,7 +2131,7 @@ try_again:
page_flags = p->flags;
if (hwpoison_filter(p)) {
- TestClearPageHWPoison(p);
+ ClearPageHWPoison(p);
unlock_page(p);
put_page(p);
res = -EOPNOTSUPP;
@@ -2359,7 +2356,7 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
- if (PageSlab(page) || PageTable(page))
+ if (PageSlab(page) || PageTable(page) || PageReserved(page))
goto unlock_mutex;
ret = get_hwpoison_page(p, MF_UNPOISON);
@@ -2383,13 +2380,14 @@ int unpoison_memory(unsigned long pfn)
count = free_raw_hwp_pages(page, false);
if (count == 0) {
ret = -EBUSY;
+ put_page(page);
goto unlock_mutex;
}
}
freeit = !!TestClearPageHWPoison(p);
put_page(page);
- if (freeit && !(pfn == my_zero_pfn(0) && page_count(p) == 1)) {
+ if (freeit) {
put_page(page);
ret = 0;
}
@@ -2409,24 +2407,26 @@ EXPORT_SYMBOL(unpoison_memory);
static bool isolate_page(struct page *page, struct list_head *pagelist)
{
bool isolated = false;
- bool lru = PageLRU(page);
if (PageHuge(page)) {
isolated = !isolate_hugetlb(page, pagelist);
} else {
+ bool lru = !__PageMovable(page);
+
if (lru)
isolated = !isolate_lru_page(page);
else
- isolated = !isolate_movable_page(page, ISOLATE_UNEVICTABLE);
+ isolated = !isolate_movable_page(page,
+ ISOLATE_UNEVICTABLE);
- if (isolated)
+ if (isolated) {
list_add(&page->lru, pagelist);
+ if (lru)
+ inc_node_page_state(page, NR_ISOLATED_ANON +
+ page_is_file_lru(page));
+ }
}
- if (isolated && lru)
- inc_node_page_state(page, NR_ISOLATED_ANON +
- page_is_file_lru(page));
-
/*
* If we succeed to isolate the page, we grabbed another refcount on
* the page, so we can safely drop the one we got from get_any_pages().
@@ -2439,11 +2439,11 @@ static bool isolate_page(struct page *page, struct list_head *pagelist)
}
/*
- * __soft_offline_page handles hugetlb-pages and non-hugetlb pages.
+ * soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
* If the page is mapped, it migrates the contents over.
*/
-static int __soft_offline_page(struct page *page)
+static int soft_offline_in_use_page(struct page *page)
{
long ret = 0;
unsigned long pfn = page_to_pfn(page);
@@ -2456,6 +2456,14 @@ static int __soft_offline_page(struct page *page)
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
};
+ if (!huge && PageTransHuge(hpage)) {
+ if (try_to_split_thp_page(page)) {
+ pr_info("soft offline: %#lx: thp split failed\n", pfn);
+ return -EBUSY;
+ }
+ hpage = page;
+ }
+
lock_page(page);
if (!PageHuge(page))
wait_on_page_writeback(page);
@@ -2505,26 +2513,6 @@ static int __soft_offline_page(struct page *page)
return ret;
}
-static int soft_offline_in_use_page(struct page *page)
-{
- struct page *hpage = compound_head(page);
-
- if (!PageHuge(page) && PageTransHuge(hpage))
- if (try_to_split_thp_page(page, "soft offline") < 0)
- return -EBUSY;
- return __soft_offline_page(page);
-}
-
-static int soft_offline_free_page(struct page *page)
-{
- int rc = 0;
-
- if (!page_handle_poison(page, true, false))
- rc = -EBUSY;
-
- return rc;
-}
-
static void put_ref_page(struct page *page)
{
if (page)
@@ -2592,8 +2580,6 @@ retry:
if (hwpoison_filter(page)) {
if (ret > 0)
put_page(page);
- else
- put_ref_page(ref_page);
mutex_unlock(&mf_mutex);
return -EOPNOTSUPP;
@@ -2602,7 +2588,7 @@ retry:
if (ret > 0) {
ret = soft_offline_in_use_page(page);
} else if (ret == 0) {
- if (soft_offline_free_page(page) && try_again) {
+ if (!page_handle_poison(page, true, false) && try_again) {
try_again = false;
flags &= ~MF_COUNT_INCREASED;
goto retry;
@@ -2616,7 +2602,7 @@ retry:
void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
{
- int i;
+ int i, total = 0;
/*
* A further optimization is to have per section refcounted
@@ -2629,8 +2615,10 @@ void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
for (i = 0; i < nr_pages; i++) {
if (PageHWPoison(&memmap[i])) {
- num_poisoned_pages_dec();
+ total++;
ClearPageHWPoison(&memmap[i]);
}
}
+ if (total)
+ num_poisoned_pages_sub(total);
}
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
new file mode 100644
index 000000000000..f116b7b6333e
--- /dev/null
+++ b/mm/memory-tiers.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/slab.h>
+#include <linux/lockdep.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/memory.h>
+#include <linux/memory-tiers.h>
+
+#include "internal.h"
+
+struct memory_tier {
+ /* hierarchy of memory tiers */
+ struct list_head list;
+ /* list of all memory types part of this tier */
+ struct list_head memory_types;
+ /*
+ * start value of abstract distance. memory tier maps
+ * an abstract distance range,
+ * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
+ */
+ int adistance_start;
+ struct device dev;
+ /* All the nodes that are part of all the lower memory tiers. */
+ nodemask_t lower_tier_mask;
+};
+
+struct demotion_nodes {
+ nodemask_t preferred;
+};
+
+struct node_memory_type_map {
+ struct memory_dev_type *memtype;
+ int map_count;
+};
+
+static DEFINE_MUTEX(memory_tier_lock);
+static LIST_HEAD(memory_tiers);
+static struct node_memory_type_map node_memory_types[MAX_NUMNODES];
+static struct memory_dev_type *default_dram_type;
+
+static struct bus_type memory_tier_subsys = {
+ .name = "memory_tiering",
+ .dev_name = "memory_tier",
+};
+
+#ifdef CONFIG_MIGRATION
+static int top_tier_adistance;
+/*
+ * node_demotion[] examples:
+ *
+ * Example 1:
+ *
+ * Node 0 & 1 are CPU + DRAM nodes, node 2 & 3 are PMEM nodes.
+ *
+ * node distances:
+ * node 0 1 2 3
+ * 0 10 20 30 40
+ * 1 20 10 40 30
+ * 2 30 40 10 40
+ * 3 40 30 40 10
+ *
+ * memory_tiers0 = 0-1
+ * memory_tiers1 = 2-3
+ *
+ * node_demotion[0].preferred = 2
+ * node_demotion[1].preferred = 3
+ * node_demotion[2].preferred = <empty>
+ * node_demotion[3].preferred = <empty>
+ *
+ * Example 2:
+ *
+ * Node 0 & 1 are CPU + DRAM nodes, node 2 is memory-only DRAM node.
+ *
+ * node distances:
+ * node 0 1 2
+ * 0 10 20 30
+ * 1 20 10 30
+ * 2 30 30 10
+ *
+ * memory_tiers0 = 0-2
+ *
+ * node_demotion[0].preferred = <empty>
+ * node_demotion[1].preferred = <empty>
+ * node_demotion[2].preferred = <empty>
+ *
+ * Example 3:
+ *
+ * Node 0 is CPU + DRAM nodes, Node 1 is HBM node, node 2 is PMEM node.
+ *
+ * node distances:
+ * node 0 1 2
+ * 0 10 20 30
+ * 1 20 10 40
+ * 2 30 40 10
+ *
+ * memory_tiers0 = 1
+ * memory_tiers1 = 0
+ * memory_tiers2 = 2
+ *
+ * node_demotion[0].preferred = 2
+ * node_demotion[1].preferred = 0
+ * node_demotion[2].preferred = <empty>
+ *
+ */
+static struct demotion_nodes *node_demotion __read_mostly;
+#endif /* CONFIG_MIGRATION */
+
+static inline struct memory_tier *to_memory_tier(struct device *device)
+{
+ return container_of(device, struct memory_tier, dev);
+}
+
+static __always_inline nodemask_t get_memtier_nodemask(struct memory_tier *memtier)
+{
+ nodemask_t nodes = NODE_MASK_NONE;
+ struct memory_dev_type *memtype;
+
+ list_for_each_entry(memtype, &memtier->memory_types, tier_sibiling)
+ nodes_or(nodes, nodes, memtype->nodes);
+
+ return nodes;
+}
+
+static void memory_tier_device_release(struct device *dev)
+{
+ struct memory_tier *tier = to_memory_tier(dev);
+ /*
+ * synchronize_rcu in clear_node_memory_tier makes sure
+ * we don't have rcu access to this memory tier.
+ */
+ kfree(tier);
+}
+
+static ssize_t nodes_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ nodemask_t nmask;
+
+ mutex_lock(&memory_tier_lock);
+ nmask = get_memtier_nodemask(to_memory_tier(dev));
+ ret = sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&nmask));
+ mutex_unlock(&memory_tier_lock);
+ return ret;
+}
+static DEVICE_ATTR_RO(nodes);
+
+static struct attribute *memtier_dev_attrs[] = {
+ &dev_attr_nodes.attr,
+ NULL
+};
+
+static const struct attribute_group memtier_dev_group = {
+ .attrs = memtier_dev_attrs,
+};
+
+static const struct attribute_group *memtier_dev_groups[] = {
+ &memtier_dev_group,
+ NULL
+};
+
+static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memtype)
+{
+ int ret;
+ bool found_slot = false;
+ struct memory_tier *memtier, *new_memtier;
+ int adistance = memtype->adistance;
+ unsigned int memtier_adistance_chunk_size = MEMTIER_CHUNK_SIZE;
+
+ lockdep_assert_held_once(&memory_tier_lock);
+
+ adistance = round_down(adistance, memtier_adistance_chunk_size);
+ /*
+ * If the memtype is already part of a memory tier,
+ * just return that.
+ */
+ if (!list_empty(&memtype->tier_sibiling)) {
+ list_for_each_entry(memtier, &memory_tiers, list) {
+ if (adistance == memtier->adistance_start)
+ return memtier;
+ }
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(memtier, &memory_tiers, list) {
+ if (adistance == memtier->adistance_start) {
+ goto link_memtype;
+ } else if (adistance < memtier->adistance_start) {
+ found_slot = true;
+ break;
+ }
+ }
+
+ new_memtier = kzalloc(sizeof(struct memory_tier), GFP_KERNEL);
+ if (!new_memtier)
+ return ERR_PTR(-ENOMEM);
+
+ new_memtier->adistance_start = adistance;
+ INIT_LIST_HEAD(&new_memtier->list);
+ INIT_LIST_HEAD(&new_memtier->memory_types);
+ if (found_slot)
+ list_add_tail(&new_memtier->list, &memtier->list);
+ else
+ list_add_tail(&new_memtier->list, &memory_tiers);
+
+ new_memtier->dev.id = adistance >> MEMTIER_CHUNK_BITS;
+ new_memtier->dev.bus = &memory_tier_subsys;
+ new_memtier->dev.release = memory_tier_device_release;
+ new_memtier->dev.groups = memtier_dev_groups;
+
+ ret = device_register(&new_memtier->dev);
+ if (ret) {
+ list_del(&memtier->list);
+ put_device(&memtier->dev);
+ return ERR_PTR(ret);
+ }
+ memtier = new_memtier;
+
+link_memtype:
+ list_add(&memtype->tier_sibiling, &memtier->memory_types);
+ return memtier;
+}
+
+static struct memory_tier *__node_get_memory_tier(int node)
+{
+ pg_data_t *pgdat;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return NULL;
+ /*
+ * Since we hold memory_tier_lock, we can avoid
+ * RCU read locks when accessing the details. No
+ * parallel updates are possible here.
+ */
+ return rcu_dereference_check(pgdat->memtier,
+ lockdep_is_held(&memory_tier_lock));
+}
+
+#ifdef CONFIG_MIGRATION
+bool node_is_toptier(int node)
+{
+ bool toptier;
+ pg_data_t *pgdat;
+ struct memory_tier *memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return false;
+
+ rcu_read_lock();
+ memtier = rcu_dereference(pgdat->memtier);
+ if (!memtier) {
+ toptier = true;
+ goto out;
+ }
+ if (memtier->adistance_start <= top_tier_adistance)
+ toptier = true;
+ else
+ toptier = false;
+out:
+ rcu_read_unlock();
+ return toptier;
+}
+
+void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
+{
+ struct memory_tier *memtier;
+
+ /*
+ * pg_data_t.memtier updates includes a synchronize_rcu()
+ * which ensures that we either find NULL or a valid memtier
+ * in NODE_DATA. protect the access via rcu_read_lock();
+ */
+ rcu_read_lock();
+ memtier = rcu_dereference(pgdat->memtier);
+ if (memtier)
+ *targets = memtier->lower_tier_mask;
+ else
+ *targets = NODE_MASK_NONE;
+ rcu_read_unlock();
+}
+
+/**
+ * next_demotion_node() - Get the next node in the demotion path
+ * @node: The starting node to lookup the next node
+ *
+ * Return: node id for next memory node in the demotion path hierarchy
+ * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
+ * @node online or guarantee that it *continues* to be the next demotion
+ * target.
+ */
+int next_demotion_node(int node)
+{
+ struct demotion_nodes *nd;
+ int target;
+
+ if (!node_demotion)
+ return NUMA_NO_NODE;
+
+ nd = &node_demotion[node];
+
+ /*
+ * node_demotion[] is updated without excluding this
+ * function from running.
+ *
+ * Make sure to use RCU over entire code blocks if
+ * node_demotion[] reads need to be consistent.
+ */
+ rcu_read_lock();
+ /*
+ * If there are multiple target nodes, just select one
+ * target node randomly.
+ *
+ * In addition, we can also use round-robin to select
+ * target node, but we should introduce another variable
+ * for node_demotion[] to record last selected target node,
+ * that may cause cache ping-pong due to the changing of
+ * last target node. Or introducing per-cpu data to avoid
+ * caching issue, which seems more complicated. So selecting
+ * target node randomly seems better until now.
+ */
+ target = node_random(&nd->preferred);
+ rcu_read_unlock();
+
+ return target;
+}
+
+static void disable_all_demotion_targets(void)
+{
+ struct memory_tier *memtier;
+ int node;
+
+ for_each_node_state(node, N_MEMORY) {
+ node_demotion[node].preferred = NODE_MASK_NONE;
+ /*
+ * We are holding memory_tier_lock, it is safe
+ * to access pgda->memtier.
+ */
+ memtier = __node_get_memory_tier(node);
+ if (memtier)
+ memtier->lower_tier_mask = NODE_MASK_NONE;
+ }
+ /*
+ * Ensure that the "disable" is visible across the system.
+ * Readers will see either a combination of before+disable
+ * state or disable+after. They will never see before and
+ * after state together.
+ */
+ synchronize_rcu();
+}
+
+/*
+ * Find an automatic demotion target for all memory
+ * nodes. Failing here is OK. It might just indicate
+ * being at the end of a chain.
+ */
+static void establish_demotion_targets(void)
+{
+ struct memory_tier *memtier;
+ struct demotion_nodes *nd;
+ int target = NUMA_NO_NODE, node;
+ int distance, best_distance;
+ nodemask_t tier_nodes, lower_tier;
+
+ lockdep_assert_held_once(&memory_tier_lock);
+
+ if (!node_demotion || !IS_ENABLED(CONFIG_MIGRATION))
+ return;
+
+ disable_all_demotion_targets();
+
+ for_each_node_state(node, N_MEMORY) {
+ best_distance = -1;
+ nd = &node_demotion[node];
+
+ memtier = __node_get_memory_tier(node);
+ if (!memtier || list_is_last(&memtier->list, &memory_tiers))
+ continue;
+ /*
+ * Get the lower memtier to find the demotion node list.
+ */
+ memtier = list_next_entry(memtier, list);
+ tier_nodes = get_memtier_nodemask(memtier);
+ /*
+ * find_next_best_node, use 'used' nodemask as a skip list.
+ * Add all memory nodes except the selected memory tier
+ * nodelist to skip list so that we find the best node from the
+ * memtier nodelist.
+ */
+ nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes);
+
+ /*
+ * Find all the nodes in the memory tier node list of same best distance.
+ * add them to the preferred mask. We randomly select between nodes
+ * in the preferred mask when allocating pages during demotion.
+ */
+ do {
+ target = find_next_best_node(node, &tier_nodes);
+ if (target == NUMA_NO_NODE)
+ break;
+
+ distance = node_distance(node, target);
+ if (distance == best_distance || best_distance == -1) {
+ best_distance = distance;
+ node_set(target, nd->preferred);
+ } else {
+ break;
+ }
+ } while (1);
+ }
+ /*
+ * Promotion is allowed from a memory tier to higher
+ * memory tier only if the memory tier doesn't include
+ * compute. We want to skip promotion from a memory tier,
+ * if any node that is part of the memory tier have CPUs.
+ * Once we detect such a memory tier, we consider that tier
+ * as top tiper from which promotion is not allowed.
+ */
+ list_for_each_entry_reverse(memtier, &memory_tiers, list) {
+ tier_nodes = get_memtier_nodemask(memtier);
+ nodes_and(tier_nodes, node_states[N_CPU], tier_nodes);
+ if (!nodes_empty(tier_nodes)) {
+ /*
+ * abstract distance below the max value of this memtier
+ * is considered toptier.
+ */
+ top_tier_adistance = memtier->adistance_start +
+ MEMTIER_CHUNK_SIZE - 1;
+ break;
+ }
+ }
+ /*
+ * Now build the lower_tier mask for each node collecting node mask from
+ * all memory tier below it. This allows us to fallback demotion page
+ * allocation to a set of nodes that is closer the above selected
+ * perferred node.
+ */
+ lower_tier = node_states[N_MEMORY];
+ list_for_each_entry(memtier, &memory_tiers, list) {
+ /*
+ * Keep removing current tier from lower_tier nodes,
+ * This will remove all nodes in current and above
+ * memory tier from the lower_tier mask.
+ */
+ tier_nodes = get_memtier_nodemask(memtier);
+ nodes_andnot(lower_tier, lower_tier, tier_nodes);
+ memtier->lower_tier_mask = lower_tier;
+ }
+}
+
+#else
+static inline void disable_all_demotion_targets(void) {}
+static inline void establish_demotion_targets(void) {}
+#endif /* CONFIG_MIGRATION */
+
+static inline void __init_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+ if (!node_memory_types[node].memtype)
+ node_memory_types[node].memtype = memtype;
+ /*
+ * for each device getting added in the same NUMA node
+ * with this specific memtype, bump the map count. We
+ * Only take memtype device reference once, so that
+ * changing a node memtype can be done by droping the
+ * only reference count taken here.
+ */
+
+ if (node_memory_types[node].memtype == memtype) {
+ if (!node_memory_types[node].map_count++)
+ kref_get(&memtype->kref);
+ }
+}
+
+static struct memory_tier *set_node_memory_tier(int node)
+{
+ struct memory_tier *memtier;
+ struct memory_dev_type *memtype;
+ pg_data_t *pgdat = NODE_DATA(node);
+
+
+ lockdep_assert_held_once(&memory_tier_lock);
+
+ if (!node_state(node, N_MEMORY))
+ return ERR_PTR(-EINVAL);
+
+ __init_node_memory_type(node, default_dram_type);
+
+ memtype = node_memory_types[node].memtype;
+ node_set(node, memtype->nodes);
+ memtier = find_create_memory_tier(memtype);
+ if (!IS_ERR(memtier))
+ rcu_assign_pointer(pgdat->memtier, memtier);
+ return memtier;
+}
+
+static void destroy_memory_tier(struct memory_tier *memtier)
+{
+ list_del(&memtier->list);
+ device_unregister(&memtier->dev);
+}
+
+static bool clear_node_memory_tier(int node)
+{
+ bool cleared = false;
+ pg_data_t *pgdat;
+ struct memory_tier *memtier;
+
+ pgdat = NODE_DATA(node);
+ if (!pgdat)
+ return false;
+
+ /*
+ * Make sure that anybody looking at NODE_DATA who finds
+ * a valid memtier finds memory_dev_types with nodes still
+ * linked to the memtier. We achieve this by waiting for
+ * rcu read section to finish using synchronize_rcu.
+ * This also enables us to free the destroyed memory tier
+ * with kfree instead of kfree_rcu
+ */
+ memtier = __node_get_memory_tier(node);
+ if (memtier) {
+ struct memory_dev_type *memtype;
+
+ rcu_assign_pointer(pgdat->memtier, NULL);
+ synchronize_rcu();
+ memtype = node_memory_types[node].memtype;
+ node_clear(node, memtype->nodes);
+ if (nodes_empty(memtype->nodes)) {
+ list_del_init(&memtype->tier_sibiling);
+ if (list_empty(&memtier->memory_types))
+ destroy_memory_tier(memtier);
+ }
+ cleared = true;
+ }
+ return cleared;
+}
+
+static void release_memtype(struct kref *kref)
+{
+ struct memory_dev_type *memtype;
+
+ memtype = container_of(kref, struct memory_dev_type, kref);
+ kfree(memtype);
+}
+
+struct memory_dev_type *alloc_memory_type(int adistance)
+{
+ struct memory_dev_type *memtype;
+
+ memtype = kmalloc(sizeof(*memtype), GFP_KERNEL);
+ if (!memtype)
+ return ERR_PTR(-ENOMEM);
+
+ memtype->adistance = adistance;
+ INIT_LIST_HEAD(&memtype->tier_sibiling);
+ memtype->nodes = NODE_MASK_NONE;
+ kref_init(&memtype->kref);
+ return memtype;
+}
+EXPORT_SYMBOL_GPL(alloc_memory_type);
+
+void destroy_memory_type(struct memory_dev_type *memtype)
+{
+ kref_put(&memtype->kref, release_memtype);
+}
+EXPORT_SYMBOL_GPL(destroy_memory_type);
+
+void init_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+
+ mutex_lock(&memory_tier_lock);
+ __init_node_memory_type(node, memtype);
+ mutex_unlock(&memory_tier_lock);
+}
+EXPORT_SYMBOL_GPL(init_node_memory_type);
+
+void clear_node_memory_type(int node, struct memory_dev_type *memtype)
+{
+ mutex_lock(&memory_tier_lock);
+ if (node_memory_types[node].memtype == memtype)
+ node_memory_types[node].map_count--;
+ /*
+ * If we umapped all the attached devices to this node,
+ * clear the node memory type.
+ */
+ if (!node_memory_types[node].map_count) {
+ node_memory_types[node].memtype = NULL;
+ kref_put(&memtype->kref, release_memtype);
+ }
+ mutex_unlock(&memory_tier_lock);
+}
+EXPORT_SYMBOL_GPL(clear_node_memory_type);
+
+static int __meminit memtier_hotplug_callback(struct notifier_block *self,
+ unsigned long action, void *_arg)
+{
+ struct memory_tier *memtier;
+ struct memory_notify *arg = _arg;
+
+ /*
+ * Only update the node migration order when a node is
+ * changing status, like online->offline.
+ */
+ if (arg->status_change_nid < 0)
+ return notifier_from_errno(0);
+
+ switch (action) {
+ case MEM_OFFLINE:
+ mutex_lock(&memory_tier_lock);
+ if (clear_node_memory_tier(arg->status_change_nid))
+ establish_demotion_targets();
+ mutex_unlock(&memory_tier_lock);
+ break;
+ case MEM_ONLINE:
+ mutex_lock(&memory_tier_lock);
+ memtier = set_node_memory_tier(arg->status_change_nid);
+ if (!IS_ERR(memtier))
+ establish_demotion_targets();
+ mutex_unlock(&memory_tier_lock);
+ break;
+ }
+
+ return notifier_from_errno(0);
+}
+
+static int __init memory_tier_init(void)
+{
+ int ret, node;
+ struct memory_tier *memtier;
+
+ ret = subsys_virtual_register(&memory_tier_subsys, NULL);
+ if (ret)
+ panic("%s() failed to register memory tier subsystem\n", __func__);
+
+#ifdef CONFIG_MIGRATION
+ node_demotion = kcalloc(nr_node_ids, sizeof(struct demotion_nodes),
+ GFP_KERNEL);
+ WARN_ON(!node_demotion);
+#endif
+ mutex_lock(&memory_tier_lock);
+ /*
+ * For now we can have 4 faster memory tiers with smaller adistance
+ * than default DRAM tier.
+ */
+ default_dram_type = alloc_memory_type(MEMTIER_ADISTANCE_DRAM);
+ if (!default_dram_type)
+ panic("%s() failed to allocate default DRAM tier\n", __func__);
+
+ /*
+ * Look at all the existing N_MEMORY nodes and add them to
+ * default memory tier or to a tier if we already have memory
+ * types assigned.
+ */
+ for_each_node_state(node, N_MEMORY) {
+ memtier = set_node_memory_tier(node);
+ if (IS_ERR(memtier))
+ /*
+ * Continue with memtiers we are able to setup
+ */
+ break;
+ }
+ establish_demotion_targets();
+ mutex_unlock(&memory_tier_lock);
+
+ hotplug_memory_notifier(memtier_hotplug_callback, MEMTIER_HOTPLUG_PRIO);
+ return 0;
+}
+subsys_initcall(memory_tier_init);
+
+bool numa_demotion_enabled = false;
+
+#ifdef CONFIG_MIGRATION
+#ifdef CONFIG_SYSFS
+static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ numa_demotion_enabled ? "true" : "false");
+}
+
+static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ ssize_t ret;
+
+ ret = kstrtobool(buf, &numa_demotion_enabled);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static struct kobj_attribute numa_demotion_enabled_attr =
+ __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
+ numa_demotion_enabled_store);
+
+static struct attribute *numa_attrs[] = {
+ &numa_demotion_enabled_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group numa_attr_group = {
+ .attrs = numa_attrs,
+};
+
+static int __init numa_init_sysfs(void)
+{
+ int err;
+ struct kobject *numa_kobj;
+
+ numa_kobj = kobject_create_and_add("numa", mm_kobj);
+ if (!numa_kobj) {
+ pr_err("failed to create numa kobject\n");
+ return -ENOMEM;
+ }
+ err = sysfs_create_group(numa_kobj, &numa_attr_group);
+ if (err) {
+ pr_err("failed to register numa group\n");
+ goto delete_obj;
+ }
+ return 0;
+
+delete_obj:
+ kobject_put(numa_kobj);
+ return err;
+}
+subsys_initcall(numa_init_sysfs);
+#endif /* CONFIG_SYSFS */
+#endif
diff --git a/mm/memory.c b/mm/memory.c
index a78814413ac0..df678fa30cdb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -52,6 +52,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
+#include <linux/kmsan.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
@@ -66,6 +67,7 @@
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
+#include <linux/memory-tiers.h>
#include <linux/debugfs.h>
#include <linux/userfaultfd_k.h>
#include <linux/dax.h>
@@ -74,6 +76,7 @@
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/vmalloc.h>
+#include <linux/sched/sysctl.h>
#include <trace/events/kmem.h>
@@ -125,18 +128,6 @@ int randomize_va_space __read_mostly =
2;
#endif
-#ifndef arch_faults_on_old_pte
-static inline bool arch_faults_on_old_pte(void)
-{
- /*
- * Those arches which don't have hw access flag feature need to
- * implement their own helper. By default, "true" means pagefault
- * will be hit on old pte.
- */
- return true;
-}
-#endif
-
#ifndef arch_wants_old_prefaulted_pte
static inline bool arch_wants_old_prefaulted_pte(void)
{
@@ -402,12 +393,21 @@ void free_pgd_range(struct mmu_gather *tlb,
} while (pgd++, addr = next, addr != end);
}
-void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long floor, unsigned long ceiling)
+void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
+ struct vm_area_struct *vma, unsigned long floor,
+ unsigned long ceiling)
{
- while (vma) {
- struct vm_area_struct *next = vma->vm_next;
+ MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
+
+ do {
unsigned long addr = vma->vm_start;
+ struct vm_area_struct *next;
+
+ /*
+ * Note: USER_PGTABLES_CEILING may be passed as ceiling and may
+ * be 0. This will underflow and is okay.
+ */
+ next = mas_find(&mas, ceiling - 1);
/*
* Hide vma from rmap and truncate_pagecache before freeing
@@ -426,7 +426,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
while (next && next->vm_start <= vma->vm_end + PMD_SIZE
&& !is_vm_hugetlb_page(next)) {
vma = next;
- next = vma->vm_next;
+ next = mas_find(&mas, ceiling - 1);
unlink_anon_vmas(vma);
unlink_file_vma(vma);
}
@@ -434,7 +434,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
floor, next ? next->vm_start : ceiling);
}
vma = next;
- }
+ } while (vma);
}
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
@@ -1685,10 +1685,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- i_mmap_lock_write(vma->vm_file->f_mapping);
__unmap_hugepage_range_final(tlb, vma, start, end,
NULL, zap_flags);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
}
} else
unmap_page_range(tlb, vma, start, end, details);
@@ -1698,6 +1696,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
/**
* unmap_vmas - unmap a range of memory covered by a list of vma's
* @tlb: address of the caller's struct mmu_gather
+ * @mt: the maple tree
* @vma: the starting vma
* @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping
@@ -1713,7 +1712,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* ensure that any thus-far unmapped pages are flushed before unmap_vmas()
* drops the lock and schedules.
*/
-void unmap_vmas(struct mmu_gather *tlb,
+void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr)
{
@@ -1723,12 +1722,14 @@ void unmap_vmas(struct mmu_gather *tlb,
/* Careful - we need to zap private pages too! */
.even_cows = true,
};
+ MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
+ do {
unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
+ } while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1743,8 +1744,11 @@ void unmap_vmas(struct mmu_gather *tlb,
void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size)
{
+ struct maple_tree *mt = &vma->vm_mm->mm_mt;
+ unsigned long end = start + size;
struct mmu_notifier_range range;
struct mmu_gather tlb;
+ MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -1752,8 +1756,9 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
- for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
+ do {
unmap_single_vma(&tlb, vma, start, range.end, NULL);
+ } while ((vma = mas_find(&mas, end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
}
@@ -2870,7 +2875,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src,
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
- if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+ if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
@@ -3128,6 +3133,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
delayacct_wpcopy_end();
return 0;
}
+ kmsan_copy_page_meta(new_page, old_page);
}
if (mem_cgroup_charge(page_folio(new_page), mm, GFP_KERNEL))
@@ -3362,6 +3368,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
{
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
struct vm_area_struct *vma = vmf->vma;
+ struct folio *folio;
VM_BUG_ON(unshare && (vmf->flags & FAULT_FLAG_WRITE));
VM_BUG_ON(!unshare && !(vmf->flags & FAULT_FLAG_WRITE));
@@ -3408,48 +3415,47 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(vmf->page)) {
- struct page *page = vmf->page;
-
+ folio = page_folio(vmf->page);
+ if (folio_test_anon(folio)) {
/*
* If the page is exclusive to this process we must reuse the
* page without further checks.
*/
- if (PageAnonExclusive(page))
+ if (PageAnonExclusive(vmf->page))
goto reuse;
/*
- * We have to verify under page lock: these early checks are
- * just an optimization to avoid locking the page and freeing
+ * We have to verify under folio lock: these early checks are
+ * just an optimization to avoid locking the folio and freeing
* the swapcache if there is little hope that we can reuse.
*
- * PageKsm() doesn't necessarily raise the page refcount.
+ * KSM doesn't necessarily raise the folio refcount.
*/
- if (PageKsm(page) || page_count(page) > 3)
+ if (folio_test_ksm(folio) || folio_ref_count(folio) > 3)
goto copy;
- if (!PageLRU(page))
+ if (!folio_test_lru(folio))
/*
* Note: We cannot easily detect+handle references from
- * remote LRU pagevecs or references to PageLRU() pages.
+ * remote LRU pagevecs or references to LRU folios.
*/
lru_add_drain();
- if (page_count(page) > 1 + PageSwapCache(page))
+ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio))
goto copy;
- if (!trylock_page(page))
+ if (!folio_trylock(folio))
goto copy;
- if (PageSwapCache(page))
- try_to_free_swap(page);
- if (PageKsm(page) || page_count(page) != 1) {
- unlock_page(page);
+ if (folio_test_swapcache(folio))
+ folio_free_swap(folio);
+ if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) {
+ folio_unlock(folio);
goto copy;
}
/*
- * Ok, we've got the only page reference from our mapping
- * and the page is locked, it's dark out, and we're wearing
+ * Ok, we've got the only folio reference from our mapping
+ * and the folio is locked, it's dark out, and we're wearing
* sunglasses. Hit it.
*/
- page_move_anon_rmap(page, vma);
- unlock_page(page);
+ page_move_anon_rmap(vmf->page, vma);
+ folio_unlock(folio);
reuse:
if (unlikely(unshare)) {
pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -3612,11 +3618,11 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
{
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
- if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+ if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
return VM_FAULT_RETRY;
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, vmf->address & PAGE_MASK,
@@ -3626,23 +3632,23 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
- restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
+ restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
- unlock_page(page);
+ folio_unlock(folio);
mmu_notifier_invalidate_range_end(&range);
return 0;
}
-static inline bool should_try_to_free_swap(struct page *page,
+static inline bool should_try_to_free_swap(struct folio *folio,
struct vm_area_struct *vma,
unsigned int fault_flags)
{
- if (!PageSwapCache(page))
+ if (!folio_test_swapcache(folio))
return false;
- if (mem_cgroup_swap_full(page) || (vma->vm_flags & VM_LOCKED) ||
- PageMlocked(page))
+ if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
+ folio_test_mlocked(folio))
return true;
/*
* If we want to map a page that's in the swapcache writable, we
@@ -3650,8 +3656,8 @@ static inline bool should_try_to_free_swap(struct page *page,
* user. Try freeing the swapcache to get rid of the swapcache
* reference only in case it's likely that we'll be the exlusive user.
*/
- return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) &&
- page_count(page) == 2;
+ return (fault_flags & FAULT_FLAG_WRITE) && !folio_test_ksm(folio) &&
+ folio_ref_count(folio) == 2;
}
static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
@@ -3718,7 +3724,8 @@ static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
vm_fault_t do_swap_page(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
- struct page *page = NULL, *swapcache;
+ struct folio *swapcache, *folio = NULL;
+ struct page *page;
struct swap_info_struct *si = NULL;
rmap_t rmap_flags = RMAP_NONE;
bool exclusive = false;
@@ -3760,21 +3767,25 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!si))
goto out;
- page = lookup_swap_cache(entry, vma, vmf->address);
- swapcache = page;
+ folio = swap_cache_get_folio(entry, vma, vmf->address);
+ if (folio)
+ page = folio_file_page(folio, swp_offset(entry));
+ swapcache = folio;
- if (!page) {
+ if (!folio) {
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
__swap_count(entry) == 1) {
/* skip swapcache */
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
- vmf->address);
- if (page) {
- __SetPageLocked(page);
- __SetPageSwapBacked(page);
-
- if (mem_cgroup_swapin_charge_page(page,
- vma->vm_mm, GFP_KERNEL, entry)) {
+ folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0,
+ vma, vmf->address, false);
+ page = &folio->page;
+ if (folio) {
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+
+ if (mem_cgroup_swapin_charge_folio(folio,
+ vma->vm_mm, GFP_KERNEL,
+ entry)) {
ret = VM_FAULT_OOM;
goto out_page;
}
@@ -3782,23 +3793,24 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
shadow = get_shadow_from_swap_cache(entry);
if (shadow)
- workingset_refault(page_folio(page),
- shadow);
+ workingset_refault(folio, shadow);
- lru_cache_add(page);
+ folio_add_lru(folio);
/* To provide entry to swap_readpage() */
- set_page_private(page, entry.val);
+ folio_set_swap_entry(folio, entry);
swap_readpage(page, true, NULL);
- set_page_private(page, 0);
+ folio->private = NULL;
}
} else {
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
vmf);
- swapcache = page;
+ if (page)
+ folio = page_folio(page);
+ swapcache = folio;
}
- if (!page) {
+ if (!folio) {
/*
* Back out if somebody else faulted in this pte
* while we released the pte lock.
@@ -3823,7 +3835,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_release;
}
- locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
+ locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
if (!locked) {
ret |= VM_FAULT_RETRY;
@@ -3832,13 +3844,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (swapcache) {
/*
- * Make sure try_to_free_swap or swapoff did not release the
+ * Make sure folio_free_swap() or swapoff did not release the
* swapcache from under us. The page pin, and pte_same test
* below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not
* changed.
*/
- if (unlikely(!PageSwapCache(page) ||
+ if (unlikely(!folio_test_swapcache(folio) ||
page_private(page) != entry.val))
goto out_page;
@@ -3850,9 +3862,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
page = ksm_might_need_to_copy(page, vma, vmf->address);
if (unlikely(!page)) {
ret = VM_FAULT_OOM;
- page = swapcache;
goto out_page;
}
+ folio = page_folio(page);
/*
* If we want to map a page that's in the swapcache writable, we
@@ -3860,8 +3872,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* owner. Try removing the extra reference from the local LRU
* pagevecs if required.
*/
- if ((vmf->flags & FAULT_FLAG_WRITE) && page == swapcache &&
- !PageKsm(page) && !PageLRU(page))
+ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
+ !folio_test_ksm(folio) && !folio_test_lru(folio))
lru_add_drain();
}
@@ -3875,7 +3887,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
goto out_nomap;
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(!folio_test_uptodate(folio))) {
ret = VM_FAULT_SIGBUS;
goto out_nomap;
}
@@ -3888,26 +3900,26 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* check after taking the PT lock and making sure that nobody
* concurrently faulted in this page and set PG_anon_exclusive.
*/
- BUG_ON(!PageAnon(page) && PageMappedToDisk(page));
- BUG_ON(PageAnon(page) && PageAnonExclusive(page));
+ BUG_ON(!folio_test_anon(folio) && folio_test_mappedtodisk(folio));
+ BUG_ON(folio_test_anon(folio) && PageAnonExclusive(page));
/*
* Check under PT lock (to protect against concurrent fork() sharing
* the swap entry concurrently) for certainly exclusive pages.
*/
- if (!PageKsm(page)) {
+ if (!folio_test_ksm(folio)) {
/*
* Note that pte_swp_exclusive() == false for architectures
* without __HAVE_ARCH_PTE_SWP_EXCLUSIVE.
*/
exclusive = pte_swp_exclusive(vmf->orig_pte);
- if (page != swapcache) {
+ if (folio != swapcache) {
/*
* We have a fresh page that is not exposed to the
* swapcache -> certainly exclusive.
*/
exclusive = true;
- } else if (exclusive && PageWriteback(page) &&
+ } else if (exclusive && folio_test_writeback(folio) &&
data_race(si->flags & SWP_STABLE_WRITES)) {
/*
* This is tricky: not all swap backends support
@@ -3937,8 +3949,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* yet.
*/
swap_free(entry);
- if (should_try_to_free_swap(page, vma, vmf->flags))
- try_to_free_swap(page);
+ if (should_try_to_free_swap(folio, vma, vmf->flags))
+ folio_free_swap(folio);
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
@@ -3950,7 +3962,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* exposing them to the swapcache or because the swap entry indicates
* exclusivity.
*/
- if (!PageKsm(page) && (exclusive || page_count(page) == 1)) {
+ if (!folio_test_ksm(folio) &&
+ (exclusive || folio_ref_count(folio) == 1)) {
if (vmf->flags & FAULT_FLAG_WRITE) {
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
vmf->flags &= ~FAULT_FLAG_WRITE;
@@ -3968,19 +3981,20 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
vmf->orig_pte = pte;
/* ksm created a completely new copy */
- if (unlikely(page != swapcache && swapcache)) {
+ if (unlikely(folio != swapcache && swapcache)) {
page_add_new_anon_rmap(page, vma, vmf->address);
- lru_cache_add_inactive_or_unevictable(page, vma);
+ folio_add_lru_vma(folio, vma);
} else {
page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
}
- VM_BUG_ON(!PageAnon(page) || (pte_write(pte) && !PageAnonExclusive(page)));
+ VM_BUG_ON(!folio_test_anon(folio) ||
+ (pte_write(pte) && !PageAnonExclusive(page)));
set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
- unlock_page(page);
- if (page != swapcache && swapcache) {
+ folio_unlock(folio);
+ if (folio != swapcache && swapcache) {
/*
* Hold the lock to avoid the swap entry to be reused
* until we take the PT lock for the pte_same() check
@@ -3989,8 +4003,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
* so that the swap count won't change under a
* parallel locked swapcache.
*/
- unlock_page(swapcache);
- put_page(swapcache);
+ folio_unlock(swapcache);
+ folio_put(swapcache);
}
if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -4011,12 +4025,12 @@ out:
out_nomap:
pte_unmap_unlock(vmf->pte, vmf->ptl);
out_page:
- unlock_page(page);
+ folio_unlock(folio);
out_release:
- put_page(page);
- if (page != swapcache && swapcache) {
- unlock_page(swapcache);
- put_page(swapcache);
+ folio_put(folio);
+ if (folio != swapcache && swapcache) {
+ folio_unlock(swapcache);
+ folio_put(swapcache);
}
if (si)
put_swap_device(si);
@@ -4731,8 +4745,16 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
flags |= TNF_SHARED;
- last_cpupid = page_cpupid_last(page);
page_nid = page_to_nid(page);
+ /*
+ * For memory tiering mode, cpupid of slow memory page is used
+ * to record page access time. So use default value.
+ */
+ if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
+ !node_is_toptier(page_nid))
+ last_cpupid = (-1 & LAST_CPUPID_MASK);
+ else
+ last_cpupid = page_cpupid_last(page);
target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
&flags);
if (target_nid == NUMA_NO_NODE) {
@@ -4991,7 +5013,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- hugepage_vma_check(vma, vm_flags, false, true)) {
+ hugepage_vma_check(vma, vm_flags, false, true, true)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5025,7 +5047,7 @@ retry_pud:
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- hugepage_vma_check(vma, vm_flags, false, true)) {
+ hugepage_vma_check(vma, vm_flags, false, true, true)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5120,6 +5142,27 @@ static inline void mm_account_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
+#ifdef CONFIG_LRU_GEN
+static void lru_gen_enter_fault(struct vm_area_struct *vma)
+{
+ /* the LRU algorithm doesn't apply to sequential or random reads */
+ current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
+}
+
+static void lru_gen_exit_fault(void)
+{
+ current->in_lru_fault = false;
+}
+#else
+static void lru_gen_enter_fault(struct vm_area_struct *vma)
+{
+}
+
+static void lru_gen_exit_fault(void)
+{
+}
+#endif /* CONFIG_LRU_GEN */
+
/*
* By the time we get here, we already hold the mm semaphore
*
@@ -5151,11 +5194,15 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
if (flags & FAULT_FLAG_USER)
mem_cgroup_enter_user_fault();
+ lru_gen_enter_fault(vma);
+
if (unlikely(is_vm_hugetlb_page(vma)))
ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
else
ret = __handle_mm_fault(vma, address, flags);
+ lru_gen_exit_fault();
+
if (flags & FAULT_FLAG_USER) {
mem_cgroup_exit_user_fault();
/*
@@ -5643,11 +5690,11 @@ static void clear_gigantic_page(struct page *page,
unsigned int pages_per_huge_page)
{
int i;
- struct page *p = page;
+ struct page *p;
might_sleep();
- for (i = 0; i < pages_per_huge_page;
- i++, p = mem_map_next(p, page, i)) {
+ for (i = 0; i < pages_per_huge_page; i++) {
+ p = nth_page(page, i);
cond_resched();
clear_user_highpage(p, addr + i * PAGE_SIZE);
}
@@ -5683,13 +5730,12 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
struct page *dst_base = dst;
struct page *src_base = src;
- for (i = 0; i < pages_per_huge_page; ) {
+ for (i = 0; i < pages_per_huge_page; i++) {
+ dst = nth_page(dst_base, i);
+ src = nth_page(src_base, i);
+
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
-
- i++;
- dst = mem_map_next(dst, dst_base, i);
- src = mem_map_next(src, src_base, i);
}
}
@@ -5736,10 +5782,10 @@ long copy_huge_page_from_user(struct page *dst_page,
void *page_kaddr;
unsigned long i, rc = 0;
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
- struct page *subpage = dst_page;
+ struct page *subpage;
- for (i = 0; i < pages_per_huge_page;
- i++, subpage = mem_map_next(subpage, dst_page, i)) {
+ for (i = 0; i < pages_per_huge_page; i++) {
+ subpage = nth_page(dst_page, i);
if (allow_pagefault)
page_kaddr = kmap(subpage);
else
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index fad6d1f2262a..fd40f7e9f176 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1085,8 +1085,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
*/
- if (WARN_ON_ONCE(!nr_pages ||
- !IS_ALIGNED(pfn, pageblock_nr_pages) ||
+ if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
@@ -1806,8 +1805,7 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
* of the physical memory space for vmemmaps. That space is pageblock
* aligned.
*/
- if (WARN_ON_ONCE(!nr_pages ||
- !IS_ALIGNED(start_pfn, pageblock_nr_pages) ||
+ if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
@@ -1940,8 +1938,8 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
node_states_clear_node(node, &arg);
if (arg.status_change_nid >= 0) {
- kswapd_stop(node);
kcompactd_stop(node);
+ kswapd_stop(node);
}
writeback_set_ratelimit();
@@ -1969,11 +1967,10 @@ failed_removal:
static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
{
- int ret = !is_memblock_offlined(mem);
int *nid = arg;
*nid = mem->nid;
- if (unlikely(ret)) {
+ if (unlikely(mem->state != MEM_OFFLINE)) {
phys_addr_t beginpa, endpa;
beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b73d3248d976..a937eaec5b68 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -381,9 +381,10 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_write_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next)
+ for_each_vma(vmi, vma)
mpol_rebind_policy(vma->vm_policy, new);
mmap_write_unlock(mm);
}
@@ -654,7 +655,7 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
static int queue_pages_test_walk(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
- struct vm_area_struct *vma = walk->vma;
+ struct vm_area_struct *next, *vma = walk->vma;
struct queue_pages *qp = walk->private;
unsigned long endvma = vma->vm_end;
unsigned long flags = qp->flags;
@@ -669,9 +670,10 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
/* hole at head side of range */
return -EFAULT;
}
+ next = find_vma(vma->vm_mm, vma->vm_end);
if (!(flags & MPOL_MF_DISCONTIG_OK) &&
((vma->vm_end < qp->end) &&
- (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
+ (!next || vma->vm_end < next->vm_start)))
/* hole at middle or tail of range */
return -EFAULT;
@@ -785,26 +787,24 @@ static int vma_replace_policy(struct vm_area_struct *vma,
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
{
+ MA_STATE(mas, &mm->mm_mt, start - 1, start - 1);
struct vm_area_struct *prev;
struct vm_area_struct *vma;
int err = 0;
pgoff_t pgoff;
- unsigned long vmstart;
- unsigned long vmend;
- vma = find_vma(mm, start);
- VM_BUG_ON(!vma);
-
- prev = vma->vm_prev;
- if (start > vma->vm_start)
- prev = vma;
+ prev = mas_find_rev(&mas, 0);
+ if (prev && (start < prev->vm_end))
+ vma = prev;
+ else
+ vma = mas_next(&mas, end - 1);
- for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
- vmstart = max(start, vma->vm_start);
- vmend = min(end, vma->vm_end);
+ for (; vma; vma = mas_next(&mas, end - 1)) {
+ unsigned long vmstart = max(start, vma->vm_start);
+ unsigned long vmend = min(end, vma->vm_end);
if (mpol_equal(vma_policy(vma), new_pol))
- continue;
+ goto next;
pgoff = vma->vm_pgoff +
((vmstart - vma->vm_start) >> PAGE_SHIFT);
@@ -813,6 +813,8 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
new_pol, vma->vm_userfaultfd_ctx,
anon_vma_name(vma));
if (prev) {
+ /* vma_merge() invalidated the mas */
+ mas_pause(&mas);
vma = prev;
goto replace;
}
@@ -820,19 +822,25 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
err = split_vma(vma->vm_mm, vma, vmstart, 1);
if (err)
goto out;
+ /* split_vma() invalidated the mas */
+ mas_pause(&mas);
}
if (vma->vm_end != vmend) {
err = split_vma(vma->vm_mm, vma, vmend, 0);
if (err)
goto out;
+ /* split_vma() invalidated the mas */
+ mas_pause(&mas);
}
- replace:
+replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
+next:
+ prev = vma;
}
- out:
+out:
return err;
}
@@ -853,12 +861,14 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags,
goto out;
}
+ task_lock(current);
ret = mpol_set_nodemask(new, nodes, scratch);
if (ret) {
+ task_unlock(current);
mpol_put(new);
goto out;
}
- task_lock(current);
+
old = current->mempolicy;
current->mempolicy = new;
if (new && new->mode == MPOL_INTERLEAVE)
@@ -1047,6 +1057,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
int flags)
{
nodemask_t nmask;
+ struct vm_area_struct *vma;
LIST_HEAD(pagelist);
int err = 0;
struct migration_target_control mtc = {
@@ -1062,8 +1073,9 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
* need migration. Between passing in the full user address
* space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
*/
+ vma = find_vma(mm, 0);
VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
- queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
+ queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
@@ -1193,14 +1205,13 @@ static struct page *new_page(struct page *page, unsigned long start)
struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma;
unsigned long address;
+ VMA_ITERATOR(vmi, current->mm, start);
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
- vma = find_vma(current->mm, start);
- while (vma) {
+ for_each_vma(vmi, vma) {
address = page_address_in_vma(page, vma);
if (address != -EFAULT)
break;
- vma = vma->vm_next;
}
if (folio_test_hugetlb(src))
@@ -1259,7 +1270,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (mode == MPOL_DEFAULT)
flags &= ~MPOL_MF_STRICT;
- len = (len + PAGE_SIZE - 1) & PAGE_MASK;
+ len = PAGE_ALIGN(len);
end = start + len;
if (end < start)
@@ -1478,6 +1489,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
unsigned long vmend;
unsigned long end;
int err = -ENOENT;
+ VMA_ITERATOR(vmi, mm, start);
start = untagged_addr(start);
if (start & ~PAGE_MASK)
@@ -1495,7 +1507,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
if (home_node >= MAX_NUMNODES || !node_online(home_node))
return -EINVAL;
- len = (len + PAGE_SIZE - 1) & PAGE_MASK;
+ len = PAGE_ALIGN(len);
end = start + len;
if (end < start)
@@ -1503,9 +1515,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
if (end == start)
return 0;
mmap_write_lock(mm);
- vma = find_vma(mm, start);
- for (; vma && vma->vm_start < end; vma = vma->vm_next) {
-
+ for_each_vma_range(vmi, vma, end) {
vmstart = max(start, vma->vm_start);
vmend = min(end, vma->vm_end);
new = mpol_dup(vma_policy(vma));
@@ -1803,7 +1813,7 @@ bool vma_policy_mof(struct vm_area_struct *vma)
return pol->flags & MPOL_F_MOF;
}
-static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
+bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
{
enum zone_type dynamic_policy_zone = policy_zone;
diff --git a/mm/memremap.c b/mm/memremap.c
index 58b20c3c300b..25029a474d30 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -454,7 +454,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
/* fall back to slow path lookup */
rcu_read_lock();
pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
- if (pgmap && !percpu_ref_tryget_live(&pgmap->ref))
+ if (pgmap && !percpu_ref_tryget_live_rcu(&pgmap->ref))
pgmap = NULL;
rcu_read_unlock();
diff --git a/mm/migrate.c b/mm/migrate.c
index 6a1597c92261..c228afba0963 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -50,6 +50,7 @@
#include <linux/memory.h>
#include <linux/random.h>
#include <linux/sched/sysctl.h>
+#include <linux/memory-tiers.h>
#include <asm/tlbflush.h>
@@ -198,7 +199,7 @@ static bool remove_migration_pte(struct folio *folio,
#endif
folio_get(folio);
- pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+ pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_mksoft_dirty(pte);
@@ -206,6 +207,10 @@ static bool remove_migration_pte(struct folio *folio,
* Recheck VMA as permissions can change since migration started
*/
entry = pte_to_swp_entry(*pvmw.pte);
+ if (!is_migration_entry_young(entry))
+ pte = pte_mkold(pte);
+ if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+ pte = pte_mkdirty(pte);
if (is_writable_migration_entry(entry))
pte = maybe_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(*pvmw.pte))
@@ -560,6 +565,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
* future migrations of this same page.
*/
cpupid = page_cpupid_xchg_last(&folio->page, -1);
+ /*
+ * For memory tiering mode, when migrate between slow and fast
+ * memory node, reset cpupid, because that is used to record
+ * page access time in slow memory node.
+ */
+ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
+ bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
+ bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
+
+ if (f_toptier != t_toptier)
+ cpupid = -1;
+ }
page_cpupid_xchg_last(&newfolio->page, cpupid);
folio_migrate_ksm(newfolio, folio);
@@ -976,17 +993,15 @@ out:
return rc;
}
-static int __unmap_and_move(struct page *page, struct page *newpage,
+static int __unmap_and_move(struct folio *src, struct folio *dst,
int force, enum migrate_mode mode)
{
- struct folio *folio = page_folio(page);
- struct folio *dst = page_folio(newpage);
int rc = -EAGAIN;
bool page_was_mapped = false;
struct anon_vma *anon_vma = NULL;
- bool is_lru = !__PageMovable(page);
+ bool is_lru = !__PageMovable(&src->page);
- if (!trylock_page(page)) {
+ if (!folio_trylock(src)) {
if (!force || mode == MIGRATE_ASYNC)
goto out;
@@ -1006,10 +1021,10 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (current->flags & PF_MEMALLOC)
goto out;
- lock_page(page);
+ folio_lock(src);
}
- if (PageWriteback(page)) {
+ if (folio_test_writeback(src)) {
/*
* Only in the case of a full synchronous migration is it
* necessary to wait for PageWriteback. In the async case,
@@ -1026,39 +1041,39 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
}
if (!force)
goto out_unlock;
- wait_on_page_writeback(page);
+ folio_wait_writeback(src);
}
/*
- * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
- * we cannot notice that anon_vma is freed while we migrates a page.
+ * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
+ * we cannot notice that anon_vma is freed while we migrate a page.
* This get_anon_vma() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*
- * Only page_get_anon_vma() understands the subtleties of
+ * Only folio_get_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
* But if we cannot get anon_vma, then we won't need it anyway,
* because that implies that the anon page is no longer mapped
* (and cannot be remapped so long as we hold the page lock).
*/
- if (PageAnon(page) && !PageKsm(page))
- anon_vma = page_get_anon_vma(page);
+ if (folio_test_anon(src) && !folio_test_ksm(src))
+ anon_vma = folio_get_anon_vma(src);
/*
* Block others from accessing the new page when we get around to
* establishing additional references. We are usually the only one
- * holding a reference to newpage at this point. We used to have a BUG
- * here if trylock_page(newpage) fails, but would like to allow for
- * cases where there might be a race with the previous use of newpage.
+ * holding a reference to dst at this point. We used to have a BUG
+ * here if folio_trylock(dst) fails, but would like to allow for
+ * cases where there might be a race with the previous use of dst.
* This is much like races on refcount of oldpage: just don't BUG().
*/
- if (unlikely(!trylock_page(newpage)))
+ if (unlikely(!folio_trylock(dst)))
goto out_unlock;
if (unlikely(!is_lru)) {
- rc = move_to_new_folio(dst, folio, mode);
+ rc = move_to_new_folio(dst, src, mode);
goto out_unlock_both;
}
@@ -1066,7 +1081,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU
* and treated as swapcache but it has no rmap yet.
- * Calling try_to_unmap() against a page->mapping==NULL page will
+ * Calling try_to_unmap() against a src->mapping==NULL page will
* trigger a BUG. So handle it here.
* 2. An orphaned page (see truncate_cleanup_page) might have
* fs-private metadata. The page can be picked up due to memory
@@ -1074,57 +1089,56 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* invisible to the vm, so the page can not be migrated. So try to
* free the metadata, so the page can be freed.
*/
- if (!page->mapping) {
- VM_BUG_ON_PAGE(PageAnon(page), page);
- if (page_has_private(page)) {
- try_to_free_buffers(folio);
+ if (!src->mapping) {
+ if (folio_test_private(src)) {
+ try_to_free_buffers(src);
goto out_unlock_both;
}
- } else if (page_mapped(page)) {
+ } else if (folio_mapped(src)) {
/* Establish migration ptes */
- VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
- page);
- try_to_migrate(folio, 0);
+ VM_BUG_ON_FOLIO(folio_test_anon(src) &&
+ !folio_test_ksm(src) && !anon_vma, src);
+ try_to_migrate(src, 0);
page_was_mapped = true;
}
- if (!page_mapped(page))
- rc = move_to_new_folio(dst, folio, mode);
+ if (!folio_mapped(src))
+ rc = move_to_new_folio(dst, src, mode);
/*
- * When successful, push newpage to LRU immediately: so that if it
+ * When successful, push dst to LRU immediately: so that if it
* turns out to be an mlocked page, remove_migration_ptes() will
- * automatically build up the correct newpage->mlock_count for it.
+ * automatically build up the correct dst->mlock_count for it.
*
* We would like to do something similar for the old page, when
* unsuccessful, and other cases when a page has been temporarily
* isolated from the unevictable LRU: but this case is the easiest.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
- lru_cache_add(newpage);
+ folio_add_lru(dst);
if (page_was_mapped)
lru_add_drain();
}
if (page_was_mapped)
- remove_migration_ptes(folio,
- rc == MIGRATEPAGE_SUCCESS ? dst : folio, false);
+ remove_migration_ptes(src,
+ rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
out_unlock_both:
- unlock_page(newpage);
+ folio_unlock(dst);
out_unlock:
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
- unlock_page(page);
+ folio_unlock(src);
out:
/*
- * If migration is successful, decrease refcount of the newpage,
+ * If migration is successful, decrease refcount of dst,
* which will not free the page because new page owner increased
* refcounter.
*/
if (rc == MIGRATEPAGE_SUCCESS)
- put_page(newpage);
+ folio_put(dst);
return rc;
}
@@ -1140,6 +1154,7 @@ static int unmap_and_move(new_page_t get_new_page,
enum migrate_reason reason,
struct list_head *ret)
{
+ struct folio *dst, *src = page_folio(page);
int rc = MIGRATEPAGE_SUCCESS;
struct page *newpage = NULL;
@@ -1157,9 +1172,10 @@ static int unmap_and_move(new_page_t get_new_page,
newpage = get_new_page(page, private);
if (!newpage)
return -ENOMEM;
+ dst = page_folio(newpage);
newpage->private = 0;
- rc = __unmap_and_move(page, newpage, force, mode);
+ rc = __unmap_and_move(src, dst, force, mode);
if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason);
@@ -1244,12 +1260,10 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* tables or check whether the hugepage is pmd-based or not before
* kicking migration.
*/
- if (!hugepage_migration_supported(page_hstate(hpage))) {
- list_move_tail(&hpage->lru, ret);
+ if (!hugepage_migration_supported(page_hstate(hpage)))
return -ENOSYS;
- }
- if (page_count(hpage) == 1) {
+ if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
putback_active_hugepage(hpage);
return MIGRATEPAGE_SUCCESS;
@@ -1260,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return -ENOMEM;
dst = page_folio(new_hpage);
- if (!trylock_page(hpage)) {
+ if (!folio_trylock(src)) {
if (!force)
goto out;
switch (mode) {
@@ -1270,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
default:
goto out;
}
- lock_page(hpage);
+ folio_lock(src);
}
/*
* Check for pages which are in the process of being freed. Without
- * page_mapping() set, hugetlbfs specific move page routine will not
+ * folio_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
- if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
+ if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
rc = -EBUSY;
goto out_unlock;
}
- if (PageAnon(hpage))
- anon_vma = page_get_anon_vma(hpage);
+ if (folio_test_anon(src))
+ anon_vma = folio_get_anon_vma(src);
- if (unlikely(!trylock_page(new_hpage)))
+ if (unlikely(!folio_trylock(dst)))
goto put_anon;
- if (page_mapped(hpage)) {
+ if (folio_mapped(src)) {
enum ttu_flags ttu = 0;
- if (!PageAnon(hpage)) {
+ if (!folio_test_anon(src)) {
/*
* In shared mappings, try_to_unmap could potentially
* call huge_pmd_unshare. Because of this, take
@@ -1313,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
i_mmap_unlock_write(mapping);
}
- if (!page_mapped(hpage))
+ if (!folio_mapped(src))
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
@@ -1321,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
unlock_put_anon:
- unlock_page(new_hpage);
+ folio_unlock(dst);
put_anon:
if (anon_vma)
@@ -1333,12 +1347,12 @@ put_anon:
}
out_unlock:
- unlock_page(hpage);
+ folio_unlock(src);
out:
if (rc == MIGRATEPAGE_SUCCESS)
putback_active_hugepage(hpage);
else if (rc != -EAGAIN)
- list_move_tail(&hpage->lru, ret);
+ list_move_tail(&src->lru, ret);
/*
* If migration was not successful and there's a freeing callback, use
@@ -1353,16 +1367,15 @@ out:
return rc;
}
-static inline int try_split_thp(struct page *page, struct page **page2,
- struct list_head *from)
+static inline int try_split_thp(struct page *page, struct list_head *split_pages)
{
- int rc = 0;
+ int rc;
lock_page(page);
- rc = split_huge_page_to_list(page, from);
+ rc = split_huge_page_to_list(page, split_pages);
unlock_page(page);
if (!rc)
- list_safe_reset_next(page, *page2, lru);
+ list_move_tail(&page->lru, split_pages);
return rc;
}
@@ -1400,6 +1413,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
int thp_retry = 1;
int nr_failed = 0;
int nr_failed_pages = 0;
+ int nr_retry_pages = 0;
int nr_succeeded = 0;
int nr_thp_succeeded = 0;
int nr_thp_failed = 0;
@@ -1420,9 +1434,9 @@ thp_subpage_migration:
for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
retry = 0;
thp_retry = 0;
+ nr_retry_pages = 0;
list_for_each_entry_safe(page, page2, from, lru) {
-retry:
/*
* THP statistics is based on the source huge page.
* Capture required information that might get lost
@@ -1447,6 +1461,7 @@ retry:
* page will be put back
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
+ * -ENOSYS: stay on the from list
* Other errno: put on ret_pages list then splice to
* from list
*/
@@ -1457,18 +1472,17 @@ retry:
* retry on the same page with the THP split
* to base pages.
*
- * Head page is retried immediately and tail
- * pages are added to the tail of the list so
- * we encounter them after the rest of the list
- * is processed.
+ * Sub-pages are put in thp_split_pages, and
+ * we will migrate them after the rest of the
+ * list is processed.
*/
case -ENOSYS:
/* THP migration is unsupported */
if (is_thp) {
nr_thp_failed++;
- if (!try_split_thp(page, &page2, &thp_split_pages)) {
+ if (!try_split_thp(page, &thp_split_pages)) {
nr_thp_split++;
- goto retry;
+ break;
}
/* Hugetlb migration is unsupported */
} else if (!no_subpage_counting) {
@@ -1476,24 +1490,25 @@ retry:
}
nr_failed_pages += nr_subpages;
+ list_move_tail(&page->lru, &ret_pages);
break;
case -ENOMEM:
/*
* When memory is low, don't bother to try to migrate
* other pages, just exit.
- * THP NUMA faulting doesn't split THP to retry.
*/
- if (is_thp && !nosplit) {
+ if (is_thp) {
nr_thp_failed++;
- if (!try_split_thp(page, &page2, &thp_split_pages)) {
+ /* THP NUMA faulting doesn't split THP to retry. */
+ if (!nosplit && !try_split_thp(page, &thp_split_pages)) {
nr_thp_split++;
- goto retry;
+ break;
}
} else if (!no_subpage_counting) {
nr_failed++;
}
- nr_failed_pages += nr_subpages;
+ nr_failed_pages += nr_subpages + nr_retry_pages;
/*
* There might be some subpages of fail-to-migrate THPs
* left in thp_split_pages list. Move them back to migration
@@ -1501,13 +1516,15 @@ retry:
* the caller otherwise the page refcnt will be leaked.
*/
list_splice_init(&thp_split_pages, from);
+ /* nr_failed isn't updated for not used */
nr_thp_failed += thp_retry;
goto out;
case -EAGAIN:
if (is_thp)
thp_retry++;
- else
+ else if (!no_subpage_counting)
retry++;
+ nr_retry_pages += nr_subpages;
break;
case MIGRATEPAGE_SUCCESS:
nr_succeeded += nr_subpages;
@@ -1533,6 +1550,7 @@ retry:
}
nr_failed += retry;
nr_thp_failed += thp_retry;
+ nr_failed_pages += nr_retry_pages;
/*
* Try to migrate subpages of fail-to-migrate THPs, no nr_failed
* counting in this round, since all subpages of a THP is counted
@@ -1672,9 +1690,12 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
goto out;
err = -ENOENT;
- if (!page || is_zone_device_page(page))
+ if (!page)
goto out;
+ if (is_zone_device_page(page))
+ goto out_putpage;
+
err = 0;
if (page_to_nid(page) == node)
goto out_putpage;
@@ -1735,7 +1756,7 @@ static int move_pages_and_store_status(struct mm_struct *mm, int node,
* well.
*/
if (err > 0)
- err += nr_pages - i - 1;
+ err += nr_pages - i;
return err;
}
return store_status(status, start, node, i - start);
@@ -1821,8 +1842,12 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
err = move_pages_and_store_status(mm, current_node, &pagelist,
status, start, i, nr_pages);
- if (err)
+ if (err) {
+ /* We have accounted for page i */
+ if (err > 0)
+ err--;
goto out;
+ }
current_node = NUMA_NO_NODE;
}
out_flush:
@@ -1848,6 +1873,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
+ unsigned int foll_flags = FOLL_DUMP;
struct vm_area_struct *vma;
struct page *page;
int err = -EFAULT;
@@ -1856,19 +1882,26 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
if (!vma)
goto set_status;
+ /* Not all huge page follow APIs support 'FOLL_GET' */
+ if (!is_vm_hugetlb_page(vma))
+ foll_flags |= FOLL_GET;
+
/* FOLL_DUMP to ignore special (like zero) pages */
- page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
+ page = follow_page(vma, addr, foll_flags);
err = PTR_ERR(page);
if (IS_ERR(page))
goto set_status;
- if (page && !is_zone_device_page(page)) {
+ err = -ENOENT;
+ if (!page)
+ goto set_status;
+
+ if (!is_zone_device_page(page))
err = page_to_nid(page);
+
+ if (foll_flags & FOLL_GET)
put_page(page);
- } else {
- err = -ENOENT;
- }
set_status:
*status = err;
@@ -2170,456 +2203,4 @@ out:
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
-
-/*
- * node_demotion[] example:
- *
- * Consider a system with two sockets. Each socket has
- * three classes of memory attached: fast, medium and slow.
- * Each memory class is placed in its own NUMA node. The
- * CPUs are placed in the node with the "fast" memory. The
- * 6 NUMA nodes (0-5) might be split among the sockets like
- * this:
- *
- * Socket A: 0, 1, 2
- * Socket B: 3, 4, 5
- *
- * When Node 0 fills up, its memory should be migrated to
- * Node 1. When Node 1 fills up, it should be migrated to
- * Node 2. The migration path start on the nodes with the
- * processors (since allocations default to this node) and
- * fast memory, progress through medium and end with the
- * slow memory:
- *
- * 0 -> 1 -> 2 -> stop
- * 3 -> 4 -> 5 -> stop
- *
- * This is represented in the node_demotion[] like this:
- *
- * { nr=1, nodes[0]=1 }, // Node 0 migrates to 1
- * { nr=1, nodes[0]=2 }, // Node 1 migrates to 2
- * { nr=0, nodes[0]=-1 }, // Node 2 does not migrate
- * { nr=1, nodes[0]=4 }, // Node 3 migrates to 4
- * { nr=1, nodes[0]=5 }, // Node 4 migrates to 5
- * { nr=0, nodes[0]=-1 }, // Node 5 does not migrate
- *
- * Moreover some systems may have multiple slow memory nodes.
- * Suppose a system has one socket with 3 memory nodes, node 0
- * is fast memory type, and node 1/2 both are slow memory
- * type, and the distance between fast memory node and slow
- * memory node is same. So the migration path should be:
- *
- * 0 -> 1/2 -> stop
- *
- * This is represented in the node_demotion[] like this:
- * { nr=2, {nodes[0]=1, nodes[1]=2} }, // Node 0 migrates to node 1 and node 2
- * { nr=0, nodes[0]=-1, }, // Node 1 dose not migrate
- * { nr=0, nodes[0]=-1, }, // Node 2 does not migrate
- */
-
-/*
- * Writes to this array occur without locking. Cycles are
- * not allowed: Node X demotes to Y which demotes to X...
- *
- * If multiple reads are performed, a single rcu_read_lock()
- * must be held over all reads to ensure that no cycles are
- * observed.
- */
-#define DEFAULT_DEMOTION_TARGET_NODES 15
-
-#if MAX_NUMNODES < DEFAULT_DEMOTION_TARGET_NODES
-#define DEMOTION_TARGET_NODES (MAX_NUMNODES - 1)
-#else
-#define DEMOTION_TARGET_NODES DEFAULT_DEMOTION_TARGET_NODES
-#endif
-
-struct demotion_nodes {
- unsigned short nr;
- short nodes[DEMOTION_TARGET_NODES];
-};
-
-static struct demotion_nodes *node_demotion __read_mostly;
-
-/**
- * next_demotion_node() - Get the next node in the demotion path
- * @node: The starting node to lookup the next node
- *
- * Return: node id for next memory node in the demotion path hierarchy
- * from @node; NUMA_NO_NODE if @node is terminal. This does not keep
- * @node online or guarantee that it *continues* to be the next demotion
- * target.
- */
-int next_demotion_node(int node)
-{
- struct demotion_nodes *nd;
- unsigned short target_nr, index;
- int target;
-
- if (!node_demotion)
- return NUMA_NO_NODE;
-
- nd = &node_demotion[node];
-
- /*
- * node_demotion[] is updated without excluding this
- * function from running. RCU doesn't provide any
- * compiler barriers, so the READ_ONCE() is required
- * to avoid compiler reordering or read merging.
- *
- * Make sure to use RCU over entire code blocks if
- * node_demotion[] reads need to be consistent.
- */
- rcu_read_lock();
- target_nr = READ_ONCE(nd->nr);
-
- switch (target_nr) {
- case 0:
- target = NUMA_NO_NODE;
- goto out;
- case 1:
- index = 0;
- break;
- default:
- /*
- * If there are multiple target nodes, just select one
- * target node randomly.
- *
- * In addition, we can also use round-robin to select
- * target node, but we should introduce another variable
- * for node_demotion[] to record last selected target node,
- * that may cause cache ping-pong due to the changing of
- * last target node. Or introducing per-cpu data to avoid
- * caching issue, which seems more complicated. So selecting
- * target node randomly seems better until now.
- */
- index = get_random_int() % target_nr;
- break;
- }
-
- target = READ_ONCE(nd->nodes[index]);
-
-out:
- rcu_read_unlock();
- return target;
-}
-
-/* Disable reclaim-based migration. */
-static void __disable_all_migrate_targets(void)
-{
- int node, i;
-
- if (!node_demotion)
- return;
-
- for_each_online_node(node) {
- node_demotion[node].nr = 0;
- for (i = 0; i < DEMOTION_TARGET_NODES; i++)
- node_demotion[node].nodes[i] = NUMA_NO_NODE;
- }
-}
-
-static void disable_all_migrate_targets(void)
-{
- __disable_all_migrate_targets();
-
- /*
- * Ensure that the "disable" is visible across the system.
- * Readers will see either a combination of before+disable
- * state or disable+after. They will never see before and
- * after state together.
- *
- * The before+after state together might have cycles and
- * could cause readers to do things like loop until this
- * function finishes. This ensures they can only see a
- * single "bad" read and would, for instance, only loop
- * once.
- */
- synchronize_rcu();
-}
-
-/*
- * Find an automatic demotion target for 'node'.
- * Failing here is OK. It might just indicate
- * being at the end of a chain.
- */
-static int establish_migrate_target(int node, nodemask_t *used,
- int best_distance)
-{
- int migration_target, index, val;
- struct demotion_nodes *nd;
-
- if (!node_demotion)
- return NUMA_NO_NODE;
-
- nd = &node_demotion[node];
-
- migration_target = find_next_best_node(node, used);
- if (migration_target == NUMA_NO_NODE)
- return NUMA_NO_NODE;
-
- /*
- * If the node has been set a migration target node before,
- * which means it's the best distance between them. Still
- * check if this node can be demoted to other target nodes
- * if they have a same best distance.
- */
- if (best_distance != -1) {
- val = node_distance(node, migration_target);
- if (val > best_distance)
- goto out_clear;
- }
-
- index = nd->nr;
- if (WARN_ONCE(index >= DEMOTION_TARGET_NODES,
- "Exceeds maximum demotion target nodes\n"))
- goto out_clear;
-
- nd->nodes[index] = migration_target;
- nd->nr++;
-
- return migration_target;
-out_clear:
- node_clear(migration_target, *used);
- return NUMA_NO_NODE;
-}
-
-/*
- * When memory fills up on a node, memory contents can be
- * automatically migrated to another node instead of
- * discarded at reclaim.
- *
- * Establish a "migration path" which will start at nodes
- * with CPUs and will follow the priorities used to build the
- * page allocator zonelists.
- *
- * The difference here is that cycles must be avoided. If
- * node0 migrates to node1, then neither node1, nor anything
- * node1 migrates to can migrate to node0. Also one node can
- * be migrated to multiple nodes if the target nodes all have
- * a same best-distance against the source node.
- *
- * This function can run simultaneously with readers of
- * node_demotion[]. However, it can not run simultaneously
- * with itself. Exclusion is provided by memory hotplug events
- * being single-threaded.
- */
-static void __set_migration_target_nodes(void)
-{
- nodemask_t next_pass;
- nodemask_t this_pass;
- nodemask_t used_targets = NODE_MASK_NONE;
- int node, best_distance;
-
- /*
- * Avoid any oddities like cycles that could occur
- * from changes in the topology. This will leave
- * a momentary gap when migration is disabled.
- */
- disable_all_migrate_targets();
-
- /*
- * Allocations go close to CPUs, first. Assume that
- * the migration path starts at the nodes with CPUs.
- */
- next_pass = node_states[N_CPU];
-again:
- this_pass = next_pass;
- next_pass = NODE_MASK_NONE;
- /*
- * To avoid cycles in the migration "graph", ensure
- * that migration sources are not future targets by
- * setting them in 'used_targets'. Do this only
- * once per pass so that multiple source nodes can
- * share a target node.
- *
- * 'used_targets' will become unavailable in future
- * passes. This limits some opportunities for
- * multiple source nodes to share a destination.
- */
- nodes_or(used_targets, used_targets, this_pass);
-
- for_each_node_mask(node, this_pass) {
- best_distance = -1;
-
- /*
- * Try to set up the migration path for the node, and the target
- * migration nodes can be multiple, so doing a loop to find all
- * the target nodes if they all have a best node distance.
- */
- do {
- int target_node =
- establish_migrate_target(node, &used_targets,
- best_distance);
-
- if (target_node == NUMA_NO_NODE)
- break;
-
- if (best_distance == -1)
- best_distance = node_distance(node, target_node);
-
- /*
- * Visit targets from this pass in the next pass.
- * Eventually, every node will have been part of
- * a pass, and will become set in 'used_targets'.
- */
- node_set(target_node, next_pass);
- } while (1);
- }
- /*
- * 'next_pass' contains nodes which became migration
- * targets in this pass. Make additional passes until
- * no more migrations targets are available.
- */
- if (!nodes_empty(next_pass))
- goto again;
-}
-
-/*
- * For callers that do not hold get_online_mems() already.
- */
-void set_migration_target_nodes(void)
-{
- get_online_mems();
- __set_migration_target_nodes();
- put_online_mems();
-}
-
-/*
- * This leaves migrate-on-reclaim transiently disabled between
- * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs
- * whether reclaim-based migration is enabled or not, which
- * ensures that the user can turn reclaim-based migration at
- * any time without needing to recalculate migration targets.
- *
- * These callbacks already hold get_online_mems(). That is why
- * __set_migration_target_nodes() can be used as opposed to
- * set_migration_target_nodes().
- */
-#ifdef CONFIG_MEMORY_HOTPLUG
-static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
- unsigned long action, void *_arg)
-{
- struct memory_notify *arg = _arg;
-
- /*
- * Only update the node migration order when a node is
- * changing status, like online->offline. This avoids
- * the overhead of synchronize_rcu() in most cases.
- */
- if (arg->status_change_nid < 0)
- return notifier_from_errno(0);
-
- switch (action) {
- case MEM_GOING_OFFLINE:
- /*
- * Make sure there are not transient states where
- * an offline node is a migration target. This
- * will leave migration disabled until the offline
- * completes and the MEM_OFFLINE case below runs.
- */
- disable_all_migrate_targets();
- break;
- case MEM_OFFLINE:
- case MEM_ONLINE:
- /*
- * Recalculate the target nodes once the node
- * reaches its final state (online or offline).
- */
- __set_migration_target_nodes();
- break;
- case MEM_CANCEL_OFFLINE:
- /*
- * MEM_GOING_OFFLINE disabled all the migration
- * targets. Reenable them.
- */
- __set_migration_target_nodes();
- break;
- case MEM_GOING_ONLINE:
- case MEM_CANCEL_ONLINE:
- break;
- }
-
- return notifier_from_errno(0);
-}
-#endif
-
-void __init migrate_on_reclaim_init(void)
-{
- node_demotion = kcalloc(nr_node_ids,
- sizeof(struct demotion_nodes),
- GFP_KERNEL);
- WARN_ON(!node_demotion);
-#ifdef CONFIG_MEMORY_HOTPLUG
- hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
-#endif
- /*
- * At this point, all numa nodes with memory/CPus have their state
- * properly set, so we can build the demotion order now.
- * Let us hold the cpu_hotplug lock just, as we could possibily have
- * CPU hotplug events during boot.
- */
- cpus_read_lock();
- set_migration_target_nodes();
- cpus_read_unlock();
-}
-
-bool numa_demotion_enabled = false;
-
-#ifdef CONFIG_SYSFS
-static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return sysfs_emit(buf, "%s\n",
- numa_demotion_enabled ? "true" : "false");
-}
-
-static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- ssize_t ret;
-
- ret = kstrtobool(buf, &numa_demotion_enabled);
- if (ret)
- return ret;
-
- return count;
-}
-
-static struct kobj_attribute numa_demotion_enabled_attr =
- __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
- numa_demotion_enabled_store);
-
-static struct attribute *numa_attrs[] = {
- &numa_demotion_enabled_attr.attr,
- NULL,
-};
-
-static const struct attribute_group numa_attr_group = {
- .attrs = numa_attrs,
-};
-
-static int __init numa_init_sysfs(void)
-{
- int err;
- struct kobject *numa_kobj;
-
- numa_kobj = kobject_create_and_add("numa", mm_kobj);
- if (!numa_kobj) {
- pr_err("failed to create numa kobject\n");
- return -ENOMEM;
- }
- err = sysfs_create_group(numa_kobj, &numa_attr_group);
- if (err) {
- pr_err("failed to register numa group\n");
- goto delete_obj;
- }
- return 0;
-
-delete_obj:
- kobject_put(numa_kobj);
- return err;
-}
-subsys_initcall(numa_init_sysfs);
-#endif /* CONFIG_SYSFS */
#endif /* CONFIG_NUMA */
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index dbf6c7a7a7c9..5ab6ab9d2ed8 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -186,9 +186,16 @@ again:
get_page(page);
/*
- * Optimize for the common case where page is only mapped once
- * in one process. If we can lock the page, then we can safely
- * set up a special migration page table entry now.
+ * We rely on trylock_page() to avoid deadlock between
+ * concurrent migrations where each is waiting on the others
+ * page lock. If we can't immediately lock the page we fail this
+ * migration as it is only best effort anyway.
+ *
+ * If we can lock the page it's safe to set up a migration entry
+ * now. In the common case where the page is mapped once in a
+ * single process setting up the migration entry now is an
+ * optimisation to avoid walking the rmap later with
+ * try_to_migrate().
*/
if (trylock_page(page)) {
bool anon_exclusive;
@@ -226,6 +233,12 @@ again:
else
entry = make_readable_migration_entry(
page_to_pfn(page));
+ if (pte_present(pte)) {
+ if (pte_young(pte))
+ entry = make_migration_entry_young(entry);
+ if (pte_dirty(pte))
+ entry = make_migration_entry_dirty(entry);
+ }
swp_pte = swp_entry_to_pte(entry);
if (pte_present(pte)) {
if (pte_soft_dirty(pte))
diff --git a/mm/mlock.c b/mm/mlock.c
index b14e929084cc..7032f6dd0ce1 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -471,6 +471,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
unsigned long nstart, end, tmp;
struct vm_area_struct *vma, *prev;
int error;
+ MA_STATE(mas, &current->mm->mm_mt, start, start);
VM_BUG_ON(offset_in_page(start));
VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -479,13 +480,14 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
return -EINVAL;
if (end == start)
return 0;
- vma = find_vma(current->mm, start);
- if (!vma || vma->vm_start > start)
+ vma = mas_walk(&mas);
+ if (!vma)
return -ENOMEM;
- prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
+ else
+ prev = mas_prev(&mas, 0);
for (nstart = start ; ; ) {
vm_flags_t newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
@@ -505,7 +507,7 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
if (nstart >= end)
break;
- vma = prev->vm_next;
+ vma = find_vma(prev->vm_mm, prev->vm_end);
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
break;
@@ -526,24 +528,21 @@ static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
{
struct vm_area_struct *vma;
unsigned long count = 0;
+ unsigned long end;
+ VMA_ITERATOR(vmi, mm, start);
- if (mm == NULL)
- mm = current->mm;
+ /* Don't overflow past ULONG_MAX */
+ if (unlikely(ULONG_MAX - len < start))
+ end = ULONG_MAX;
+ else
+ end = start + len;
- vma = find_vma(mm, start);
- if (vma == NULL)
- return 0;
-
- for (; vma ; vma = vma->vm_next) {
- if (start >= vma->vm_end)
- continue;
- if (start + len <= vma->vm_start)
- break;
+ for_each_vma_range(vmi, vma, end) {
if (vma->vm_flags & VM_LOCKED) {
if (start > vma->vm_start)
count -= (start - vma->vm_start);
- if (start + len < vma->vm_end) {
- count += start + len - vma->vm_start;
+ if (end < vma->vm_end) {
+ count += end - vma->vm_start;
break;
}
count += vma->vm_end - vma->vm_start;
@@ -659,6 +658,7 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
*/
static int apply_mlockall_flags(int flags)
{
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
struct vm_area_struct *vma, *prev = NULL;
vm_flags_t to_add = 0;
@@ -679,7 +679,7 @@ static int apply_mlockall_flags(int flags)
to_add |= VM_LOCKONFAULT;
}
- for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
vm_flags_t newflags;
newflags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
@@ -687,6 +687,7 @@ static int apply_mlockall_flags(int flags)
/* Ignore errors */
mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
+ mas_pause(&mas);
cond_resched();
}
out:
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 9ddaf0e1b0ab..0d7b2bd2454a 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layout(void)
shift = 8 * sizeof(unsigned long);
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
- - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
+ - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
- "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
+ "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
SECTIONS_WIDTH,
NODES_WIDTH,
ZONES_WIDTH,
LAST_CPUPID_WIDTH,
KASAN_TAG_WIDTH,
+ LRU_GEN_WIDTH,
+ LRU_REFS_WIDTH,
NR_PAGEFLAGS);
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
diff --git a/mm/mm_slot.h b/mm/mm_slot.h
new file mode 100644
index 000000000000..83f18ed1c4bd
--- /dev/null
+++ b/mm/mm_slot.h
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _LINUX_MM_SLOT_H
+#define _LINUX_MM_SLOT_H
+
+#include <linux/hashtable.h>
+#include <linux/slab.h>
+
+/*
+ * struct mm_slot - hash lookup from mm to mm_slot
+ * @hash: link to the mm_slots hash list
+ * @mm_node: link into the mm_slots list
+ * @mm: the mm that this information is valid for
+ */
+struct mm_slot {
+ struct hlist_node hash;
+ struct list_head mm_node;
+ struct mm_struct *mm;
+};
+
+#define mm_slot_entry(ptr, type, member) \
+ container_of(ptr, type, member)
+
+static inline void *mm_slot_alloc(struct kmem_cache *cache)
+{
+ if (!cache) /* initialization failed */
+ return NULL;
+ return kmem_cache_zalloc(cache, GFP_KERNEL);
+}
+
+static inline void mm_slot_free(struct kmem_cache *cache, void *objp)
+{
+ kmem_cache_free(cache, objp);
+}
+
+#define mm_slot_lookup(_hashtable, _mm) \
+({ \
+ struct mm_slot *tmp_slot, *mm_slot = NULL; \
+ \
+ hash_for_each_possible(_hashtable, tmp_slot, hash, (unsigned long)_mm) \
+ if (_mm == tmp_slot->mm) { \
+ mm_slot = tmp_slot; \
+ break; \
+ } \
+ \
+ mm_slot; \
+})
+
+#define mm_slot_insert(_hashtable, _mm, _mm_slot) \
+({ \
+ _mm_slot->mm = _mm; \
+ hash_add(_hashtable, &_mm_slot->hash, (unsigned long)_mm); \
+})
+
+#endif /* _LINUX_MM_SLOT_H */
diff --git a/mm/mmap.c b/mm/mmap.c
index 9d780f415be3..6e447544f07d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -14,7 +14,6 @@
#include <linux/backing-dev.h>
#include <linux/mm.h>
#include <linux/mm_inline.h>
-#include <linux/vmacache.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
@@ -39,7 +38,6 @@
#include <linux/audit.h>
#include <linux/khugepaged.h>
#include <linux/uprobes.h>
-#include <linux/rbtree_augmented.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/printk.h>
@@ -77,9 +75,10 @@ int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
static bool ignore_rlimit_data;
core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
-static void unmap_region(struct mm_struct *mm,
+static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
struct vm_area_struct *vma, struct vm_area_struct *prev,
- unsigned long start, unsigned long end);
+ struct vm_area_struct *next, unsigned long start,
+ unsigned long end);
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
{
@@ -132,12 +131,10 @@ void unlink_file_vma(struct vm_area_struct *vma)
}
/*
- * Close a vm structure and free it, returning the next.
+ * Close a vm structure and free it.
*/
-static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
+static void remove_vma(struct vm_area_struct *vma)
{
- struct vm_area_struct *next = vma->vm_next;
-
might_sleep();
if (vma->vm_ops && vma->vm_ops->close)
vma->vm_ops->close(vma);
@@ -145,20 +142,41 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
vm_area_free(vma);
- return next;
}
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
- struct list_head *uf);
+/*
+ * check_brk_limits() - Use platform specific check of range & verify mlock
+ * limits.
+ * @addr: The address to check
+ * @len: The size of increase.
+ *
+ * Return: 0 on success.
+ */
+static int check_brk_limits(unsigned long addr, unsigned long len)
+{
+ unsigned long mapped_addr;
+
+ mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ if (IS_ERR_VALUE(mapped_addr))
+ return mapped_addr;
+
+ return mlock_future_check(current->mm, current->mm->def_flags, len);
+}
+static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long newbrk, unsigned long oldbrk,
+ struct list_head *uf);
+static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *brkvma,
+ unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long newbrk, oldbrk, origbrk;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *next;
+ struct vm_area_struct *brkvma, *next = NULL;
unsigned long min_brk;
bool populate;
bool downgraded = false;
LIST_HEAD(uf);
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
if (mmap_write_lock_killable(mm))
return -EINTR;
@@ -200,35 +218,51 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Always allow shrinking brk.
- * __do_munmap() may downgrade mmap_lock to read.
+ * do_brk_munmap() may downgrade mmap_lock to read.
*/
if (brk <= mm->brk) {
int ret;
+ /* Search one past newbrk */
+ mas_set(&mas, newbrk);
+ brkvma = mas_find(&mas, oldbrk);
+ BUG_ON(brkvma == NULL);
+ if (brkvma->vm_start >= oldbrk)
+ goto out; /* mapping intersects with an existing non-brk vma. */
/*
- * mm->brk must to be protected by write mmap_lock so update it
- * before downgrading mmap_lock. When __do_munmap() fails,
- * mm->brk will be restored from origbrk.
+ * mm->brk must be protected by write mmap_lock.
+ * do_brk_munmap() may downgrade the lock, so update it
+ * before calling do_brk_munmap().
*/
mm->brk = brk;
- ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
- if (ret < 0) {
- mm->brk = origbrk;
- goto out;
- } else if (ret == 1) {
+ ret = do_brk_munmap(&mas, brkvma, newbrk, oldbrk, &uf);
+ if (ret == 1) {
downgraded = true;
- }
- goto success;
+ goto success;
+ } else if (!ret)
+ goto success;
+
+ mm->brk = origbrk;
+ goto out;
}
- /* Check against existing mmap mappings. */
- next = find_vma(mm, oldbrk);
+ if (check_brk_limits(oldbrk, newbrk - oldbrk))
+ goto out;
+
+ /*
+ * Only check if the next VMA is within the stack_guard_gap of the
+ * expansion area
+ */
+ mas_set(&mas, oldbrk);
+ next = mas_find(&mas, newbrk - 1 + PAGE_SIZE + stack_guard_gap);
if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
goto out;
+ brkvma = mas_prev(&mas, mm->start_brk);
/* Ok, looks good - let it rip. */
- if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
+ if (do_brk_flags(&mas, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
goto out;
+
mm->brk = brk;
success:
@@ -247,104 +281,45 @@ out:
return origbrk;
}
-static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
-{
- unsigned long gap, prev_end;
-
- /*
- * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
- * allow two stack_guard_gaps between them here, and when choosing
- * an unmapped area; whereas when expanding we only require one.
- * That's a little inconsistent, but keeps the code here simpler.
- */
- gap = vm_start_gap(vma);
- if (vma->vm_prev) {
- prev_end = vm_end_gap(vma->vm_prev);
- if (gap > prev_end)
- gap -= prev_end;
- else
- gap = 0;
- }
- return gap;
-}
-
-#ifdef CONFIG_DEBUG_VM_RB
-static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
-{
- unsigned long max = vma_compute_gap(vma), subtree_gap;
- if (vma->vm_rb.rb_left) {
- subtree_gap = rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- if (vma->vm_rb.rb_right) {
- subtree_gap = rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb)->rb_subtree_gap;
- if (subtree_gap > max)
- max = subtree_gap;
- }
- return max;
-}
-
-static int browse_rb(struct mm_struct *mm)
-{
- struct rb_root *root = &mm->mm_rb;
- int i = 0, j, bug = 0;
- struct rb_node *nd, *pn = NULL;
- unsigned long prev = 0, pend = 0;
-
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
- struct vm_area_struct *vma;
- vma = rb_entry(nd, struct vm_area_struct, vm_rb);
- if (vma->vm_start < prev) {
- pr_emerg("vm_start %lx < prev %lx\n",
- vma->vm_start, prev);
- bug = 1;
- }
- if (vma->vm_start < pend) {
- pr_emerg("vm_start %lx < pend %lx\n",
- vma->vm_start, pend);
- bug = 1;
- }
- if (vma->vm_start > vma->vm_end) {
- pr_emerg("vm_start %lx > vm_end %lx\n",
- vma->vm_start, vma->vm_end);
- bug = 1;
- }
- spin_lock(&mm->page_table_lock);
- if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
- pr_emerg("free gap %lx, correct %lx\n",
- vma->rb_subtree_gap,
- vma_compute_subtree_gap(vma));
- bug = 1;
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+extern void mt_validate(struct maple_tree *mt);
+extern void mt_dump(const struct maple_tree *mt);
+
+/* Validate the maple tree */
+static void validate_mm_mt(struct mm_struct *mm)
+{
+ struct maple_tree *mt = &mm->mm_mt;
+ struct vm_area_struct *vma_mt;
+
+ MA_STATE(mas, mt, 0, 0);
+
+ mt_validate(&mm->mm_mt);
+ mas_for_each(&mas, vma_mt, ULONG_MAX) {
+ if ((vma_mt->vm_start != mas.index) ||
+ (vma_mt->vm_end - 1 != mas.last)) {
+ pr_emerg("issue in %s\n", current->comm);
+ dump_stack();
+ dump_vma(vma_mt);
+ pr_emerg("mt piv: %p %lu - %lu\n", vma_mt,
+ mas.index, mas.last);
+ pr_emerg("mt vma: %p %lu - %lu\n", vma_mt,
+ vma_mt->vm_start, vma_mt->vm_end);
+
+ mt_dump(mas.tree);
+ if (vma_mt->vm_end != mas.last + 1) {
+ pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n",
+ mm, vma_mt->vm_start, vma_mt->vm_end,
+ mas.index, mas.last);
+ mt_dump(mas.tree);
+ }
+ VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm);
+ if (vma_mt->vm_start != mas.index) {
+ pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n",
+ mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end);
+ mt_dump(mas.tree);
+ }
+ VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm);
}
- spin_unlock(&mm->page_table_lock);
- i++;
- pn = nd;
- prev = vma->vm_start;
- pend = vma->vm_end;
- }
- j = 0;
- for (nd = pn; nd; nd = rb_prev(nd))
- j++;
- if (i != j) {
- pr_emerg("backwards %d, forwards %d\n", j, i);
- bug = 1;
- }
- return bug ? -1 : i;
-}
-
-static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
-{
- struct rb_node *nd;
-
- for (nd = rb_first(root); nd; nd = rb_next(nd)) {
- struct vm_area_struct *vma;
- vma = rb_entry(nd, struct vm_area_struct, vm_rb);
- VM_BUG_ON_VMA(vma != ignore &&
- vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
- vma);
}
}
@@ -352,10 +327,13 @@ static void validate_mm(struct mm_struct *mm)
{
int bug = 0;
int i = 0;
- unsigned long highest_address = 0;
- struct vm_area_struct *vma = mm->mmap;
+ struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
- while (vma) {
+ validate_mm_mt(mm);
+
+ mas_for_each(&mas, vma, ULONG_MAX) {
+#ifdef CONFIG_DEBUG_VM_RB
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
@@ -365,93 +343,20 @@ static void validate_mm(struct mm_struct *mm)
anon_vma_interval_tree_verify(avc);
anon_vma_unlock_read(anon_vma);
}
-
- highest_address = vm_end_gap(vma);
- vma = vma->vm_next;
+#endif
i++;
}
if (i != mm->map_count) {
- pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
- bug = 1;
- }
- if (highest_address != mm->highest_vm_end) {
- pr_emerg("mm->highest_vm_end %lx, found %lx\n",
- mm->highest_vm_end, highest_address);
- bug = 1;
- }
- i = browse_rb(mm);
- if (i != mm->map_count) {
- if (i != -1)
- pr_emerg("map_count %d rb %d\n", mm->map_count, i);
+ pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i);
bug = 1;
}
VM_BUG_ON_MM(bug, mm);
}
-#else
-#define validate_mm_rb(root, ignore) do { } while (0)
-#define validate_mm(mm) do { } while (0)
-#endif
-
-RB_DECLARE_CALLBACKS_MAX(static, vma_gap_callbacks,
- struct vm_area_struct, vm_rb,
- unsigned long, rb_subtree_gap, vma_compute_gap)
-
-/*
- * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
- * vma->vm_prev->vm_end values changed, without modifying the vma's position
- * in the rbtree.
- */
-static void vma_gap_update(struct vm_area_struct *vma)
-{
- /*
- * As it turns out, RB_DECLARE_CALLBACKS_MAX() already created
- * a callback function that does exactly what we want.
- */
- vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
-}
-
-static inline void vma_rb_insert(struct vm_area_struct *vma,
- struct rb_root *root)
-{
- /* All rb_subtree_gap values must be consistent prior to insertion */
- validate_mm_rb(root, NULL);
-
- rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
-{
- /*
- * Note rb_erase_augmented is a fairly large inline function,
- * so make sure we instantiate it only once with our desired
- * augmented rbtree callbacks.
- */
- rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
-}
-
-static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
- struct rb_root *root,
- struct vm_area_struct *ignore)
-{
- /*
- * All rb_subtree_gap values must be consistent prior to erase,
- * with the possible exception of
- *
- * a. the "next" vma being erased if next->vm_start was reduced in
- * __vma_adjust() -> __vma_unlink()
- * b. the vma being erased in detach_vmas_to_be_unmapped() ->
- * vma_rb_erase()
- */
- validate_mm_rb(root, ignore);
-
- __vma_rb_erase(vma, root);
-}
-
-static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
- struct rb_root *root)
-{
- vma_rb_erase_ignore(vma, root, vma);
-}
+#else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
+#define validate_mm_mt(root) do { } while (0)
+#define validate_mm(mm) do { } while (0)
+#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
/*
* vma has some anon_vma assigned, and is already inserted on that
@@ -485,208 +390,220 @@ anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
}
-static int find_vma_links(struct mm_struct *mm, unsigned long addr,
- unsigned long end, struct vm_area_struct **pprev,
- struct rb_node ***rb_link, struct rb_node **rb_parent)
+static unsigned long count_vma_pages_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
{
- struct rb_node **__rb_link, *__rb_parent, *rb_prev;
+ VMA_ITERATOR(vmi, mm, addr);
+ struct vm_area_struct *vma;
+ unsigned long nr_pages = 0;
- mmap_assert_locked(mm);
- __rb_link = &mm->mm_rb.rb_node;
- rb_prev = __rb_parent = NULL;
+ for_each_vma_range(vmi, vma, end) {
+ unsigned long vm_start = max(addr, vma->vm_start);
+ unsigned long vm_end = min(end, vma->vm_end);
- while (*__rb_link) {
- struct vm_area_struct *vma_tmp;
+ nr_pages += PHYS_PFN(vm_end - vm_start);
+ }
- __rb_parent = *__rb_link;
- vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
+ return nr_pages;
+}
- if (vma_tmp->vm_end > addr) {
- /* Fail if an existing vma overlaps the area */
- if (vma_tmp->vm_start < end)
- return -ENOMEM;
- __rb_link = &__rb_parent->rb_left;
- } else {
- rb_prev = __rb_parent;
- __rb_link = &__rb_parent->rb_right;
- }
- }
+static void __vma_link_file(struct vm_area_struct *vma,
+ struct address_space *mapping)
+{
+ if (vma->vm_flags & VM_SHARED)
+ mapping_allow_writable(mapping);
- *pprev = NULL;
- if (rb_prev)
- *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
- *rb_link = __rb_link;
- *rb_parent = __rb_parent;
- return 0;
+ flush_dcache_mmap_lock(mapping);
+ vma_interval_tree_insert(vma, &mapping->i_mmap);
+ flush_dcache_mmap_unlock(mapping);
}
/*
- * vma_next() - Get the next VMA.
- * @mm: The mm_struct.
- * @vma: The current vma.
+ * vma_mas_store() - Store a VMA in the maple tree.
+ * @vma: The vm_area_struct
+ * @mas: The maple state
*
- * If @vma is NULL, return the first vma in the mm.
+ * Efficient way to store a VMA in the maple tree when the @mas has already
+ * walked to the correct location.
*
- * Returns: The next VMA after @vma.
+ * Note: the end address is inclusive in the maple tree.
*/
-static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
- struct vm_area_struct *vma)
+void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
{
- if (!vma)
- return mm->mmap;
-
- return vma->vm_next;
+ trace_vma_store(mas->tree, vma);
+ mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_prealloc(mas, vma);
}
/*
- * munmap_vma_range() - munmap VMAs that overlap a range.
- * @mm: The mm struct
- * @start: The start of the range.
- * @len: The length of the range.
- * @pprev: pointer to the pointer that will be set to previous vm_area_struct
- * @rb_link: the rb_node
- * @rb_parent: the parent rb_node
- *
- * Find all the vm_area_struct that overlap from @start to
- * @end and munmap them. Set @pprev to the previous vm_area_struct.
+ * vma_mas_remove() - Remove a VMA from the maple tree.
+ * @vma: The vm_area_struct
+ * @mas: The maple state
*
- * Returns: -ENOMEM on munmap failure or 0 on success.
+ * Efficient way to remove a VMA from the maple tree when the @mas has already
+ * been established and points to the correct location.
+ * Note: the end address is inclusive in the maple tree.
*/
-static inline int
-munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
- struct vm_area_struct **pprev, struct rb_node ***link,
- struct rb_node **parent, struct list_head *uf)
+void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
{
-
- while (find_vma_links(mm, start, start + len, pprev, link, parent))
- if (do_munmap(mm, start, len, uf))
- return -ENOMEM;
-
- return 0;
+ trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1);
+ mas->index = vma->vm_start;
+ mas->last = vma->vm_end - 1;
+ mas_store_prealloc(mas, NULL);
}
-static unsigned long count_vma_pages_range(struct mm_struct *mm,
- unsigned long addr, unsigned long end)
+
+/*
+ * vma_mas_szero() - Set a given range to zero. Used when modifying a
+ * vm_area_struct start or end.
+ *
+ * @mm: The struct_mm
+ * @start: The start address to zero
+ * @end: The end address to zero.
+ */
+static inline void vma_mas_szero(struct ma_state *mas, unsigned long start,
+ unsigned long end)
{
- unsigned long nr_pages = 0;
- struct vm_area_struct *vma;
+ trace_vma_mas_szero(mas->tree, start, end - 1);
+ mas_set_range(mas, start, end - 1);
+ mas_store_prealloc(mas, NULL);
+}
- /* Find first overlapping mapping */
- vma = find_vma_intersection(mm, addr, end);
- if (!vma)
- return 0;
+static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ struct address_space *mapping = NULL;
- nr_pages = (min(end, vma->vm_end) -
- max(addr, vma->vm_start)) >> PAGE_SHIFT;
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ return -ENOMEM;
- /* Iterate over the rest of the overlaps */
- for (vma = vma->vm_next; vma; vma = vma->vm_next) {
- unsigned long overlap_len;
+ if (vma->vm_file) {
+ mapping = vma->vm_file->f_mapping;
+ i_mmap_lock_write(mapping);
+ }
- if (vma->vm_start > end)
- break;
+ vma_mas_store(vma, &mas);
- overlap_len = min(end, vma->vm_end) - vma->vm_start;
- nr_pages += overlap_len >> PAGE_SHIFT;
+ if (mapping) {
+ __vma_link_file(vma, mapping);
+ i_mmap_unlock_write(mapping);
}
- return nr_pages;
+ mm->map_count++;
+ validate_mm(mm);
+ return 0;
}
-void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
- struct rb_node **rb_link, struct rb_node *rb_parent)
+/*
+ * vma_expand - Expand an existing VMA
+ *
+ * @mas: The maple state
+ * @vma: The vma to expand
+ * @start: The start of the vma
+ * @end: The exclusive end of the vma
+ * @pgoff: The page offset of vma
+ * @next: The current of next vma.
+ *
+ * Expand @vma to @start and @end. Can expand off the start and end. Will
+ * expand over @next if it's different from @vma and @end == @next->vm_end.
+ * Checking if the @vma can expand and merge with @next needs to be handled by
+ * the caller.
+ *
+ * Returns: 0 on success
+ */
+inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, pgoff_t pgoff,
+ struct vm_area_struct *next)
{
- /* Update tracking information for the gap following the new vma. */
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
- mm->highest_vm_end = vm_end_gap(vma);
+ struct mm_struct *mm = vma->vm_mm;
+ struct address_space *mapping = NULL;
+ struct rb_root_cached *root = NULL;
+ struct anon_vma *anon_vma = vma->anon_vma;
+ struct file *file = vma->vm_file;
+ bool remove_next = false;
- /*
- * vma->vm_prev wasn't known when we followed the rbtree to find the
- * correct insertion point for that vma. As a result, we could not
- * update the vma vm_rb parents rb_subtree_gap values on the way down.
- * So, we first insert the vma with a zero rb_subtree_gap value
- * (to be consistent with what we did on the way down), and then
- * immediately update the gap to the correct value. Finally we
- * rebalance the rbtree after all augmented values have been set.
- */
- rb_link_node(&vma->vm_rb, rb_parent, rb_link);
- vma->rb_subtree_gap = 0;
- vma_gap_update(vma);
- vma_rb_insert(vma, &mm->mm_rb);
-}
+ if (next && (vma != next) && (end == next->vm_end)) {
+ remove_next = true;
+ if (next->anon_vma && !vma->anon_vma) {
+ int error;
-static void __vma_link_file(struct vm_area_struct *vma)
-{
- struct file *file;
+ anon_vma = next->anon_vma;
+ vma->anon_vma = anon_vma;
+ error = anon_vma_clone(vma, next);
+ if (error)
+ return error;
+ }
+ }
+
+ /* Not merging but overwriting any part of next is not handled. */
+ VM_BUG_ON(next && !remove_next && next != vma && end > next->vm_start);
+ /* Only handles expanding */
+ VM_BUG_ON(vma->vm_start < start || vma->vm_end > end);
+
+ if (mas_preallocate(mas, vma, GFP_KERNEL))
+ goto nomem;
+
+ vma_adjust_trans_huge(vma, start, end, 0);
- file = vma->vm_file;
if (file) {
- struct address_space *mapping = file->f_mapping;
+ mapping = file->f_mapping;
+ root = &mapping->i_mmap;
+ uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+ i_mmap_lock_write(mapping);
+ }
- if (vma->vm_flags & VM_SHARED)
- mapping_allow_writable(mapping);
+ if (anon_vma) {
+ anon_vma_lock_write(anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
+ if (file) {
flush_dcache_mmap_lock(mapping);
- vma_interval_tree_insert(vma, &mapping->i_mmap);
- flush_dcache_mmap_unlock(mapping);
+ vma_interval_tree_remove(vma, root);
}
-}
-static void
-__vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
-{
- __vma_link_list(mm, vma, prev);
- __vma_link_rb(mm, vma, rb_link, rb_parent);
-}
+ vma->vm_start = start;
+ vma->vm_end = end;
+ vma->vm_pgoff = pgoff;
+ /* Note: mas must be pointing to the expanding VMA */
+ vma_mas_store(vma, mas);
-static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node **rb_link,
- struct rb_node *rb_parent)
-{
- struct address_space *mapping = NULL;
+ if (file) {
+ vma_interval_tree_insert(vma, root);
+ flush_dcache_mmap_unlock(mapping);
+ }
- if (vma->vm_file) {
- mapping = vma->vm_file->f_mapping;
- i_mmap_lock_write(mapping);
+ /* Expanding over the next vma */
+ if (remove_next && file) {
+ __remove_shared_vm_struct(next, file, mapping);
}
- __vma_link(mm, vma, prev, rb_link, rb_parent);
- __vma_link_file(vma);
+ if (anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(anon_vma);
+ }
- if (mapping)
+ if (file) {
i_mmap_unlock_write(mapping);
+ uprobe_mmap(vma);
+ }
- mm->map_count++;
- validate_mm(mm);
-}
-
-/*
- * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
- * mm's list and rbtree. It has already been inserted into the interval tree.
- */
-static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- struct vm_area_struct *prev;
- struct rb_node **rb_link, *rb_parent;
+ if (remove_next) {
+ if (file) {
+ uprobe_munmap(next, next->vm_start, next->vm_end);
+ fput(file);
+ }
+ if (next->anon_vma)
+ anon_vma_merge(vma, next);
+ mm->map_count--;
+ mpol_put(vma_policy(next));
+ vm_area_free(next);
+ }
- if (find_vma_links(mm, vma->vm_start, vma->vm_end,
- &prev, &rb_link, &rb_parent))
- BUG();
- __vma_link(mm, vma, prev, rb_link, rb_parent);
- mm->map_count++;
-}
+ validate_mm(mm);
+ return 0;
-static __always_inline void __vma_unlink(struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct vm_area_struct *ignore)
-{
- vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
- __vma_unlink_list(mm, vma);
- /* Kill the cache */
- vmacache_invalidate(mm);
+nomem:
+ return -ENOMEM;
}
/*
@@ -701,18 +618,19 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
- struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
+ struct vm_area_struct *next_next, *next = find_vma(mm, vma->vm_end);
+ struct vm_area_struct *orig_vma = vma;
struct address_space *mapping = NULL;
struct rb_root_cached *root = NULL;
struct anon_vma *anon_vma = NULL;
struct file *file = vma->vm_file;
- bool start_changed = false, end_changed = false;
+ bool vma_changed = false;
long adjust_next = 0;
int remove_next = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ struct vm_area_struct *exporter = NULL, *importer = NULL;
if (next && !insert) {
- struct vm_area_struct *exporter = NULL, *importer = NULL;
-
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
@@ -741,10 +659,11 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* remove_next == 1 is case 1 or 7.
*/
remove_next = 1 + (end > next->vm_end);
+ if (remove_next == 2)
+ next_next = find_vma(mm, next->vm_end);
+
VM_WARN_ON(remove_next == 2 &&
- end != next->vm_next->vm_end);
- /* trim end to next, for case 6 first pass */
- end = next->vm_end;
+ end != next_next->vm_end);
}
exporter = next;
@@ -755,7 +674,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
* next, if the vma overlaps with it.
*/
if (remove_next == 2 && !next->anon_vma)
- exporter = next->vm_next;
+ exporter = next_next;
} else if (end > next->vm_start) {
/*
@@ -792,9 +711,11 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
return error;
}
}
-again:
- vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
+ vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
if (file) {
mapping = file->f_mapping;
root = &mapping->i_mmap;
@@ -804,14 +725,14 @@ again:
uprobe_munmap(next, next->vm_start, next->vm_end);
i_mmap_lock_write(mapping);
- if (insert) {
+ if (insert && insert->vm_file) {
/*
* Put into interval tree now, so instantiated pages
* are visible to arm/parisc __flush_dcache_page
* throughout; but we cannot insert into address
* space until vma start or end is updated.
*/
- __vma_link_file(insert);
+ __vma_link_file(insert, insert->vm_file->f_mapping);
}
}
@@ -835,17 +756,37 @@ again:
}
if (start != vma->vm_start) {
+ if ((vma->vm_start < start) &&
+ (!insert || (insert->vm_end != start))) {
+ vma_mas_szero(&mas, vma->vm_start, start);
+ VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
+ } else {
+ vma_changed = true;
+ }
vma->vm_start = start;
- start_changed = true;
}
if (end != vma->vm_end) {
+ if (vma->vm_end > end) {
+ if (!insert || (insert->vm_start != end)) {
+ vma_mas_szero(&mas, end, vma->vm_end);
+ mas_reset(&mas);
+ VM_WARN_ON(insert &&
+ insert->vm_end < vma->vm_end);
+ }
+ } else {
+ vma_changed = true;
+ }
vma->vm_end = end;
- end_changed = true;
}
+
+ if (vma_changed)
+ vma_mas_store(vma, &mas);
+
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
+ vma_mas_store(next, &mas);
}
if (file) {
@@ -855,42 +796,19 @@ again:
flush_dcache_mmap_unlock(mapping);
}
- if (remove_next) {
- /*
- * vma_merge has merged next into vma, and needs
- * us to remove next before dropping the locks.
- */
- if (remove_next != 3)
- __vma_unlink(mm, next, next);
- else
- /*
- * vma is not before next if they've been
- * swapped.
- *
- * pre-swap() next->vm_start was reduced so
- * tell validate_mm_rb to ignore pre-swap()
- * "next" (which is stored in post-swap()
- * "vma").
- */
- __vma_unlink(mm, next, vma);
- if (file)
- __remove_shared_vm_struct(next, file, mapping);
+ if (remove_next && file) {
+ __remove_shared_vm_struct(next, file, mapping);
+ if (remove_next == 2)
+ __remove_shared_vm_struct(next_next, file, mapping);
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
* us to insert it before dropping the locks
* (it may either follow vma or precede it).
*/
- __insert_vm_struct(mm, insert);
- } else {
- if (start_changed)
- vma_gap_update(vma);
- if (end_changed) {
- if (!next)
- mm->highest_vm_end = vm_end_gap(vma);
- else if (!adjust_next)
- vma_gap_update(next);
- }
+ mas_reset(&mas);
+ vma_mas_store(insert, &mas);
+ mm->map_count++;
}
if (anon_vma) {
@@ -909,6 +827,7 @@ again:
}
if (remove_next) {
+again:
if (file) {
uprobe_munmap(next, next->vm_start, next->vm_end);
fput(file);
@@ -917,66 +836,24 @@ again:
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
+ if (remove_next != 2)
+ BUG_ON(vma->vm_end < next->vm_end);
vm_area_free(next);
+
/*
* In mprotect's case 6 (see comments on vma_merge),
- * we must remove another next too. It would clutter
- * up the code too much to do both in one go.
+ * we must remove next_next too.
*/
- if (remove_next != 3) {
- /*
- * If "next" was removed and vma->vm_end was
- * expanded (up) over it, in turn
- * "next->vm_prev->vm_end" changed and the
- * "vma->vm_next" gap must be updated.
- */
- next = vma->vm_next;
- } else {
- /*
- * For the scope of the comment "next" and
- * "vma" considered pre-swap(): if "vma" was
- * removed, next->vm_start was expanded (down)
- * over it and the "next" gap must be updated.
- * Because of the swap() the post-swap() "vma"
- * actually points to pre-swap() "next"
- * (post-swap() "next" as opposed is now a
- * dangling pointer).
- */
- next = vma;
- }
if (remove_next == 2) {
remove_next = 1;
- end = next->vm_end;
+ next = next_next;
goto again;
}
- else if (next)
- vma_gap_update(next);
- else {
- /*
- * If remove_next == 2 we obviously can't
- * reach this path.
- *
- * If remove_next == 3 we can't reach this
- * path because pre-swap() next is always not
- * NULL. pre-swap() "next" is not being
- * removed and its next->vm_end is not altered
- * (and furthermore "end" already matches
- * next->vm_end in remove_next == 3).
- *
- * We reach this only in the remove_next == 1
- * case if the "next" vma that was removed was
- * the highest vma of the mm. However in such
- * case next->vm_end == "end" and the extended
- * "vma" has vma->vm_end == next->vm_end so
- * mm->highest_vm_end doesn't need any update
- * in remove_next == 1 case.
- */
- VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
- }
}
if (insert && file)
uprobe_mmap(insert);
+ mas_destroy(&mas);
validate_mm(mm);
return 0;
@@ -1128,8 +1005,10 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
struct anon_vma_name *anon_name)
{
pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
- struct vm_area_struct *area, *next;
- int err;
+ struct vm_area_struct *mid, *next, *res;
+ int err = -1;
+ bool merge_prev = false;
+ bool merge_next = false;
/*
* We later require that vma->vm_flags == vm_flags,
@@ -1138,76 +1017,61 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
if (vm_flags & VM_SPECIAL)
return NULL;
- next = vma_next(mm, prev);
- area = next;
- if (area && area->vm_end == end) /* cases 6, 7, 8 */
- next = next->vm_next;
+ next = find_vma(mm, prev ? prev->vm_end : 0);
+ mid = next;
+ if (next && next->vm_end == end) /* cases 6, 7, 8 */
+ next = find_vma(mm, next->vm_end);
/* verify some invariant that must be enforced by the caller */
VM_WARN_ON(prev && addr <= prev->vm_start);
- VM_WARN_ON(area && end > area->vm_end);
+ VM_WARN_ON(mid && end > mid->vm_end);
VM_WARN_ON(addr >= end);
- /*
- * Can it merge with the predecessor?
- */
+ /* Can we merge the predecessor? */
if (prev && prev->vm_end == addr &&
mpol_equal(vma_policy(prev), policy) &&
can_vma_merge_after(prev, vm_flags,
anon_vma, file, pgoff,
vm_userfaultfd_ctx, anon_name)) {
- /*
- * OK, it can. Can we now merge in the successor as well?
- */
- if (next && end == next->vm_start &&
- mpol_equal(policy, vma_policy(next)) &&
- can_vma_merge_before(next, vm_flags,
- anon_vma, file,
- pgoff+pglen,
- vm_userfaultfd_ctx, anon_name) &&
- is_mergeable_anon_vma(prev->anon_vma,
- next->anon_vma, NULL)) {
- /* cases 1, 6 */
- err = __vma_adjust(prev, prev->vm_start,
- next->vm_end, prev->vm_pgoff, NULL,
- prev);
- } else /* cases 2, 5, 7 */
- err = __vma_adjust(prev, prev->vm_start,
- end, prev->vm_pgoff, NULL, prev);
- if (err)
- return NULL;
- khugepaged_enter_vma(prev, vm_flags);
- return prev;
+ merge_prev = true;
}
-
- /*
- * Can this new request be merged in front of next?
- */
+ /* Can we merge the successor? */
if (next && end == next->vm_start &&
mpol_equal(policy, vma_policy(next)) &&
can_vma_merge_before(next, vm_flags,
anon_vma, file, pgoff+pglen,
vm_userfaultfd_ctx, anon_name)) {
+ merge_next = true;
+ }
+ /* Can we merge both the predecessor and the successor? */
+ if (merge_prev && merge_next &&
+ is_mergeable_anon_vma(prev->anon_vma,
+ next->anon_vma, NULL)) { /* cases 1, 6 */
+ err = __vma_adjust(prev, prev->vm_start,
+ next->vm_end, prev->vm_pgoff, NULL,
+ prev);
+ res = prev;
+ } else if (merge_prev) { /* cases 2, 5, 7 */
+ err = __vma_adjust(prev, prev->vm_start,
+ end, prev->vm_pgoff, NULL, prev);
+ res = prev;
+ } else if (merge_next) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(prev, prev->vm_start,
- addr, prev->vm_pgoff, NULL, next);
- else { /* cases 3, 8 */
- err = __vma_adjust(area, addr, next->vm_end,
- next->vm_pgoff - pglen, NULL, next);
- /*
- * In case 3 area is already equal to next and
- * this is a noop, but in case 8 "area" has
- * been removed and next was expanded over it.
- */
- area = next;
- }
- if (err)
- return NULL;
- khugepaged_enter_vma(area, vm_flags);
- return area;
+ addr, prev->vm_pgoff, NULL, next);
+ else /* cases 3, 8 */
+ err = __vma_adjust(mid, addr, next->vm_end,
+ next->vm_pgoff - pglen, NULL, next);
+ res = next;
}
- return NULL;
+ /*
+ * Cannot merge with predecessor or successor or error in __vma_adjust?
+ */
+ if (err)
+ return NULL;
+ khugepaged_enter_vma(res, vm_flags);
+ return res;
}
/*
@@ -1275,18 +1139,24 @@ static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_
*/
struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
{
+ MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
struct anon_vma *anon_vma = NULL;
+ struct vm_area_struct *prev, *next;
/* Try next first. */
- if (vma->vm_next) {
- anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
+ next = mas_walk(&mas);
+ if (next) {
+ anon_vma = reusable_anon_vma(next, vma, next);
if (anon_vma)
return anon_vma;
}
+ prev = mas_prev(&mas, 0);
+ VM_BUG_ON_VMA(prev != vma, vma);
+ prev = mas_prev(&mas, 0);
/* Try prev next. */
- if (vma->vm_prev)
- anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
+ if (prev)
+ anon_vma = reusable_anon_vma(prev, prev, vma);
/*
* We might reach here with anon_vma == NULL if we can't find
@@ -1375,6 +1245,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags_t vm_flags;
int pkey = 0;
+ validate_mm(mm);
*populate = 0;
if (!len)
@@ -1678,388 +1549,63 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
}
-unsigned long mmap_region(struct file *file, unsigned long addr,
- unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
- struct list_head *uf)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev, *merge;
- int error;
- struct rb_node **rb_link, *rb_parent;
- unsigned long charged = 0;
-
- /* Check against address space limit. */
- if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
- unsigned long nr_pages;
-
- /*
- * MAP_FIXED may remove pages of mappings that intersects with
- * requested mapping. Account for the pages it would unmap.
- */
- nr_pages = count_vma_pages_range(mm, addr, addr + len);
-
- if (!may_expand_vm(mm, vm_flags,
- (len >> PAGE_SHIFT) - nr_pages))
- return -ENOMEM;
- }
-
- /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
- if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
- return -ENOMEM;
- /*
- * Private writable mapping: check memory availability
- */
- if (accountable_mapping(file, vm_flags)) {
- charged = len >> PAGE_SHIFT;
- if (security_vm_enough_memory_mm(mm, charged))
- return -ENOMEM;
- vm_flags |= VM_ACCOUNT;
- }
-
- /*
- * Can we just expand an old mapping?
- */
- vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
- NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
- if (vma)
- goto out;
-
- /*
- * Determine the object being mapped and call the appropriate
- * specific mapper. the address has already been validated, but
- * not unmapped, but the maps are removed from the list.
- */
- vma = vm_area_alloc(mm);
- if (!vma) {
- error = -ENOMEM;
- goto unacct_error;
- }
-
- vma->vm_start = addr;
- vma->vm_end = addr + len;
- vma->vm_flags = vm_flags;
- vma->vm_page_prot = vm_get_page_prot(vm_flags);
- vma->vm_pgoff = pgoff;
-
- if (file) {
- if (vm_flags & VM_SHARED) {
- error = mapping_map_writable(file->f_mapping);
- if (error)
- goto free_vma;
- }
-
- vma->vm_file = get_file(file);
- error = call_mmap(file, vma);
- if (error)
- goto unmap_and_free_vma;
-
- /* Can addr have changed??
- *
- * Answer: Yes, several device drivers can do it in their
- * f_op->mmap method. -DaveM
- * Bug: If addr is changed, prev, rb_link, rb_parent should
- * be updated for vma_link()
- */
- WARN_ON_ONCE(addr != vma->vm_start);
-
- addr = vma->vm_start;
-
- /* If vm_flags changed after call_mmap(), we should try merge vma again
- * as we may succeed this time.
- */
- if (unlikely(vm_flags != vma->vm_flags && prev)) {
- merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
- NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
- if (merge) {
- /* ->mmap() can change vma->vm_file and fput the original file. So
- * fput the vma->vm_file here or we would add an extra fput for file
- * and cause general protection fault ultimately.
- */
- fput(vma->vm_file);
- vm_area_free(vma);
- vma = merge;
- /* Update vm_flags to pick up the change. */
- vm_flags = vma->vm_flags;
- goto unmap_writable;
- }
- }
-
- vm_flags = vma->vm_flags;
- } else if (vm_flags & VM_SHARED) {
- error = shmem_zero_setup(vma);
- if (error)
- goto free_vma;
- } else {
- vma_set_anonymous(vma);
- }
-
- /* Allow architectures to sanity-check the vm_flags */
- if (!arch_validate_flags(vma->vm_flags)) {
- error = -EINVAL;
- if (file)
- goto unmap_and_free_vma;
- else
- goto free_vma;
- }
-
- vma_link(mm, vma, prev, rb_link, rb_parent);
-
- /*
- * vma_merge() calls khugepaged_enter_vma() either, the below
- * call covers the non-merge case.
- */
- khugepaged_enter_vma(vma, vma->vm_flags);
-
- /* Once vma denies write, undo our temporary denial count */
-unmap_writable:
- if (file && vm_flags & VM_SHARED)
- mapping_unmap_writable(file->f_mapping);
- file = vma->vm_file;
-out:
- perf_event_mmap(vma);
-
- vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
- if (vm_flags & VM_LOCKED) {
- if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
- is_vm_hugetlb_page(vma) ||
- vma == get_gate_vma(current->mm))
- vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
- else
- mm->locked_vm += (len >> PAGE_SHIFT);
- }
-
- if (file)
- uprobe_mmap(vma);
-
- /*
- * New (or expanded) vma always get soft dirty status.
- * Otherwise user-space soft-dirty page tracker won't
- * be able to distinguish situation when vma area unmapped,
- * then new mapped in-place (which must be aimed as
- * a completely new data area).
- */
- vma->vm_flags |= VM_SOFTDIRTY;
-
- vma_set_page_prot(vma);
-
- return addr;
-
-unmap_and_free_vma:
- fput(vma->vm_file);
- vma->vm_file = NULL;
-
- /* Undo any partial mapping done by a device driver. */
- unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
- if (vm_flags & VM_SHARED)
- mapping_unmap_writable(file->f_mapping);
-free_vma:
- vm_area_free(vma);
-unacct_error:
- if (charged)
- vm_unacct_memory(charged);
- return error;
-}
-
+/**
+ * unmapped_area() - Find an area between the low_limit and the high_limit with
+ * the correct alignment and offset, all from @info. Note: current->mm is used
+ * for the search.
+ *
+ * @info: The unmapped area information including the range (low_limit -
+ * hight_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
- /*
- * We implement the search by looking for an rbtree node that
- * immediately follows a suitable gap. That is,
- * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
- * - gap_end = vma->vm_start >= info->low_limit + length;
- * - gap_end - gap_start >= length
- */
+ unsigned long length, gap;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long length, low_limit, high_limit, gap_start, gap_end;
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
- /* Adjust search limits by the desired length */
- if (info->high_limit < length)
- return -ENOMEM;
- high_limit = info->high_limit - length;
-
- if (info->low_limit > high_limit)
- return -ENOMEM;
- low_limit = info->low_limit + length;
-
- /* Check if rbtree root looks promising */
- if (RB_EMPTY_ROOT(&mm->mm_rb))
- goto check_highest;
- vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
- if (vma->rb_subtree_gap < length)
- goto check_highest;
-
- while (true) {
- /* Visit left subtree if it looks promising */
- gap_end = vm_start_gap(vma);
- if (gap_end >= low_limit && vma->vm_rb.rb_left) {
- struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
- if (left->rb_subtree_gap >= length) {
- vma = left;
- continue;
- }
- }
-
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
-check_current:
- /* Check if current node has a suitable gap */
- if (gap_start > high_limit)
- return -ENOMEM;
- if (gap_end >= low_limit &&
- gap_end > gap_start && gap_end - gap_start >= length)
- goto found;
-
- /* Visit right subtree if it looks promising */
- if (vma->vm_rb.rb_right) {
- struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
- if (right->rb_subtree_gap >= length) {
- vma = right;
- continue;
- }
- }
-
- /* Go back up the rbtree to find next candidate node */
- while (true) {
- struct rb_node *prev = &vma->vm_rb;
- if (!rb_parent(prev))
- goto check_highest;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
- if (prev == vma->vm_rb.rb_left) {
- gap_start = vm_end_gap(vma->vm_prev);
- gap_end = vm_start_gap(vma);
- goto check_current;
- }
- }
- }
-
-check_highest:
- /* Check highest gap, which does not precede any rbtree node */
- gap_start = mm->highest_vm_end;
- gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
- if (gap_start > high_limit)
+ if (mas_empty_area(&mas, info->low_limit, info->high_limit - 1,
+ length))
return -ENOMEM;
-found:
- /* We found a suitable gap. Clip it with the original low_limit. */
- if (gap_start < info->low_limit)
- gap_start = info->low_limit;
-
- /* Adjust gap address to the desired alignment */
- gap_start += (info->align_offset - gap_start) & info->align_mask;
-
- VM_BUG_ON(gap_start + info->length > info->high_limit);
- VM_BUG_ON(gap_start + info->length > gap_end);
- return gap_start;
+ gap = mas.index;
+ gap += (info->align_offset - gap) & info->align_mask;
+ return gap;
}
+/**
+ * unmapped_area_topdown() - Find an area between the low_limit and the
+ * high_limit with * the correct alignment and offset at the highest available
+ * address, all from @info. Note: current->mm is used for the search.
+ *
+ * @info: The unmapped area information including the range (low_limit -
+ * hight_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long length, low_limit, high_limit, gap_start, gap_end;
+ unsigned long length, gap;
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
- /*
- * Adjust search limits by the desired length.
- * See implementation comment at top of unmapped_area().
- */
- gap_end = info->high_limit;
- if (gap_end < length)
- return -ENOMEM;
- high_limit = gap_end - length;
-
- if (info->low_limit > high_limit)
+ if (mas_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
+ length))
return -ENOMEM;
- low_limit = info->low_limit + length;
-
- /* Check highest gap, which does not precede any rbtree node */
- gap_start = mm->highest_vm_end;
- if (gap_start <= high_limit)
- goto found_highest;
-
- /* Check if rbtree root looks promising */
- if (RB_EMPTY_ROOT(&mm->mm_rb))
- return -ENOMEM;
- vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
- if (vma->rb_subtree_gap < length)
- return -ENOMEM;
-
- while (true) {
- /* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
- if (gap_start <= high_limit && vma->vm_rb.rb_right) {
- struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
- if (right->rb_subtree_gap >= length) {
- vma = right;
- continue;
- }
- }
-
-check_current:
- /* Check if current node has a suitable gap */
- gap_end = vm_start_gap(vma);
- if (gap_end < low_limit)
- return -ENOMEM;
- if (gap_start <= high_limit &&
- gap_end > gap_start && gap_end - gap_start >= length)
- goto found;
-
- /* Visit left subtree if it looks promising */
- if (vma->vm_rb.rb_left) {
- struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
- if (left->rb_subtree_gap >= length) {
- vma = left;
- continue;
- }
- }
-
- /* Go back up the rbtree to find next candidate node */
- while (true) {
- struct rb_node *prev = &vma->vm_rb;
- if (!rb_parent(prev))
- return -ENOMEM;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
- if (prev == vma->vm_rb.rb_right) {
- gap_start = vma->vm_prev ?
- vm_end_gap(vma->vm_prev) : 0;
- goto check_current;
- }
- }
- }
-
-found:
- /* We found a suitable gap. Clip it with the original high_limit. */
- if (gap_end > info->high_limit)
- gap_end = info->high_limit;
-found_highest:
- /* Compute highest gap address at the desired alignment */
- gap_end -= info->length;
- gap_end -= (gap_end - info->align_offset) & info->align_mask;
-
- VM_BUG_ON(gap_end < info->low_limit);
- VM_BUG_ON(gap_end < gap_start);
- return gap_end;
+ gap = mas.last + 1 - info->length;
+ gap -= (gap - info->align_offset) & info->align_mask;
+ return gap;
}
/*
@@ -2232,6 +1778,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
*/
pgoff = 0;
get_area = shmem_get_unmapped_area;
+ } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ /* Ensures that larger anonymous mappings are THP aligned. */
+ get_area = thp_get_unmapped_area;
}
addr = get_area(file, addr, len, pgoff, flags);
@@ -2249,58 +1798,67 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
EXPORT_SYMBOL(get_unmapped_area);
-/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
-struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+/**
+ * find_vma_intersection() - Look up the first VMA which intersects the interval
+ * @mm: The process address space.
+ * @start_addr: The inclusive start user address.
+ * @end_addr: The exclusive end user address.
+ *
+ * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
+ * start_addr < end_addr.
+ */
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
{
- struct rb_node *rb_node;
- struct vm_area_struct *vma;
+ unsigned long index = start_addr;
mmap_assert_locked(mm);
- /* Check the cache first. */
- vma = vmacache_find(mm, addr);
- if (likely(vma))
- return vma;
-
- rb_node = mm->mm_rb.rb_node;
-
- while (rb_node) {
- struct vm_area_struct *tmp;
-
- tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ return mt_find(&mm->mm_mt, &index, end_addr - 1);
+}
+EXPORT_SYMBOL(find_vma_intersection);
- if (tmp->vm_end > addr) {
- vma = tmp;
- if (tmp->vm_start <= addr)
- break;
- rb_node = rb_node->rb_left;
- } else
- rb_node = rb_node->rb_right;
- }
+/**
+ * find_vma() - Find the VMA for a given address, or the next VMA.
+ * @mm: The mm_struct to check
+ * @addr: The address
+ *
+ * Returns: The VMA associated with addr, or the next VMA.
+ * May return %NULL in the case of no VMA at addr or above.
+ */
+struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long index = addr;
- if (vma)
- vmacache_update(addr, vma);
- return vma;
+ mmap_assert_locked(mm);
+ return mt_find(&mm->mm_mt, &index, ULONG_MAX);
}
-
EXPORT_SYMBOL(find_vma);
-/*
- * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
+/**
+ * find_vma_prev() - Find the VMA for a given address, or the next vma and
+ * set %pprev to the previous VMA, if any.
+ * @mm: The mm_struct to check
+ * @addr: The address
+ * @pprev: The pointer to set to the previous VMA
+ *
+ * Note that RCU lock is missing here since the external mmap_lock() is used
+ * instead.
+ *
+ * Returns: The VMA associated with @addr, or the next vma.
+ * May return %NULL in the case of no vma at addr or above.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
- vma = find_vma(mm, addr);
- if (vma) {
- *pprev = vma->vm_prev;
- } else {
- struct rb_node *rb_node = rb_last(&mm->mm_rb);
-
- *pprev = rb_node ? rb_entry(rb_node, struct vm_area_struct, vm_rb) : NULL;
- }
+ vma = mas_walk(&mas);
+ *pprev = mas_prev(&mas, 0);
+ if (!vma)
+ vma = mas_next(&mas, ULONG_MAX);
return vma;
}
@@ -2354,6 +1912,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
struct vm_area_struct *next;
unsigned long gap_addr;
int error = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
@@ -2371,16 +1930,21 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
if (gap_addr < address || gap_addr > TASK_SIZE)
gap_addr = TASK_SIZE;
- next = vma->vm_next;
- if (next && next->vm_start < gap_addr && vma_is_accessible(next)) {
+ next = find_vma_intersection(mm, vma->vm_end, gap_addr);
+ if (next && vma_is_accessible(next)) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
}
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
/* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma)))
+ if (unlikely(anon_vma_prepare(vma))) {
+ mas_destroy(&mas);
return -ENOMEM;
+ }
/*
* vma->vm_start/vm_end cannot change under us because the caller
@@ -2401,15 +1965,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_lock
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * anon_vma_lock_write() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
+ * We only hold a shared mmap_lock lock here, so
+ * we need to protect against concurrent vma
+ * expansions. anon_vma_lock_write() doesn't
+ * help here, as we don't guarantee that all
+ * growable vmas in a mm share the same root
+ * anon vma. So, we reuse mm->page_table_lock
+ * to guard against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
@@ -2417,11 +1979,9 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
vm_stat_account(mm, vma->vm_flags, grow);
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_end = address;
+ /* Overwrite old entry in mtree. */
+ vma_mas_store(vma, &mas);
anon_vma_interval_tree_post_update_vma(vma);
- if (vma->vm_next)
- vma_gap_update(vma->vm_next);
- else
- mm->highest_vm_end = vm_end_gap(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2430,7 +1990,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma(vma, vma->vm_flags);
- validate_mm(mm);
+ mas_destroy(&mas);
return error;
}
#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -2438,10 +1998,10 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/*
* vma is the first one with address < vma->vm_start. Have to extend vma.
*/
-int expand_downwards(struct vm_area_struct *vma,
- unsigned long address)
+int expand_downwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
+ MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
struct vm_area_struct *prev;
int error = 0;
@@ -2450,7 +2010,7 @@ int expand_downwards(struct vm_area_struct *vma,
return -EPERM;
/* Enforce stack_guard_gap */
- prev = vma->vm_prev;
+ prev = mas_prev(&mas, 0);
/* Check that both stack segments have the same anon_vma? */
if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
vma_is_accessible(prev)) {
@@ -2458,9 +2018,14 @@ int expand_downwards(struct vm_area_struct *vma,
return -ENOMEM;
}
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+
/* We must make sure the anon_vma is allocated. */
- if (unlikely(anon_vma_prepare(vma)))
+ if (unlikely(anon_vma_prepare(vma))) {
+ mas_destroy(&mas);
return -ENOMEM;
+ }
/*
* vma->vm_start/vm_end cannot change under us because the caller
@@ -2481,15 +2046,13 @@ int expand_downwards(struct vm_area_struct *vma,
error = acct_stack_growth(vma, size, grow);
if (!error) {
/*
- * vma_gap_update() doesn't support concurrent
- * updates, but we only hold a shared mmap_lock
- * lock here, so we need to protect against
- * concurrent vma expansions.
- * anon_vma_lock_write() doesn't help here, as
- * we don't guarantee that all growable vmas
- * in a mm share the same root anon vma.
- * So, we reuse mm->page_table_lock to guard
- * against concurrent vma expansions.
+ * We only hold a shared mmap_lock lock here, so
+ * we need to protect against concurrent vma
+ * expansions. anon_vma_lock_write() doesn't
+ * help here, as we don't guarantee that all
+ * growable vmas in a mm share the same root
+ * anon vma. So, we reuse mm->page_table_lock
+ * to guard against concurrent vma expansions.
*/
spin_lock(&mm->page_table_lock);
if (vma->vm_flags & VM_LOCKED)
@@ -2498,8 +2061,9 @@ int expand_downwards(struct vm_area_struct *vma,
anon_vma_interval_tree_pre_update_vma(vma);
vma->vm_start = address;
vma->vm_pgoff -= grow;
+ /* Overwrite old entry in mtree. */
+ vma_mas_store(vma, &mas);
anon_vma_interval_tree_post_update_vma(vma);
- vma_gap_update(vma);
spin_unlock(&mm->page_table_lock);
perf_event_mmap(vma);
@@ -2508,7 +2072,7 @@ int expand_downwards(struct vm_area_struct *vma,
}
anon_vma_unlock_write(vma->anon_vma);
khugepaged_enter_vma(vma, vma->vm_flags);
- validate_mm(mm);
+ mas_destroy(&mas);
return error;
}
@@ -2581,25 +2145,26 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
EXPORT_SYMBOL_GPL(find_extend_vma);
/*
- * Ok - we have the memory areas we should free on the vma list,
- * so release them, and do the vma updates.
+ * Ok - we have the memory areas we should free on a maple tree so release them,
+ * and do the vma updates.
*
* Called with the mm semaphore held.
*/
-static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
+static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
{
unsigned long nr_accounted = 0;
+ struct vm_area_struct *vma;
/* Update high watermark before we lower total_vm */
update_hiwater_vm(mm);
- do {
+ mas_for_each(mas, vma, ULONG_MAX) {
long nrpages = vma_pages(vma);
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += nrpages;
vm_stat_account(mm, vma->vm_flags, -nrpages);
- vma = remove_vma(vma);
- } while (vma);
+ remove_vma(vma);
+ }
vm_unacct_memory(nr_accounted);
validate_mm(mm);
}
@@ -2609,67 +2174,23 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
*
* Called with the mm semaphore held.
*/
-static void unmap_region(struct mm_struct *mm,
+static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
struct vm_area_struct *vma, struct vm_area_struct *prev,
+ struct vm_area_struct *next,
unsigned long start, unsigned long end)
{
- struct vm_area_struct *next = vma_next(mm, prev);
struct mmu_gather tlb;
lru_add_drain();
tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm);
- unmap_vmas(&tlb, vma, start, end);
- free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+ unmap_vmas(&tlb, mt, vma, start, end);
+ free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb);
}
/*
- * Create a list of vma's touched by the unmap, removing them from the mm's
- * vma list as we go..
- */
-static bool
-detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, unsigned long end)
-{
- struct vm_area_struct **insertion_point;
- struct vm_area_struct *tail_vma = NULL;
-
- insertion_point = (prev ? &prev->vm_next : &mm->mmap);
- vma->vm_prev = NULL;
- do {
- vma_rb_erase(vma, &mm->mm_rb);
- if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm -= vma_pages(vma);
- mm->map_count--;
- tail_vma = vma;
- vma = vma->vm_next;
- } while (vma && vma->vm_start < end);
- *insertion_point = vma;
- if (vma) {
- vma->vm_prev = prev;
- vma_gap_update(vma);
- } else
- mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
- tail_vma->vm_next = NULL;
-
- /* Kill the cache */
- vmacache_invalidate(mm);
-
- /*
- * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
- * VM_GROWSUP VMA. Such VMAs can change their size under
- * down_read(mmap_lock) and collide with the VMA we are about to unmap.
- */
- if (vma && (vma->vm_flags & VM_GROWSDOWN))
- return false;
- if (prev && (prev->vm_flags & VM_GROWSUP))
- return false;
- return true;
-}
-
-/*
* __split_vma() bypasses sysctl_max_map_count checking. We use this where it
* has already been checked or doesn't make sense to fail.
*/
@@ -2678,6 +2199,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct vm_area_struct *new;
int err;
+ validate_mm_mt(mm);
if (vma->vm_ops && vma->vm_ops->may_split) {
err = vma->vm_ops->may_split(vma, addr);
@@ -2720,6 +2242,9 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (!err)
return 0;
+ /* Avoid vm accounting in close() operation */
+ new->vm_start = new->vm_end;
+ new->vm_pgoff = 0;
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
@@ -2730,6 +2255,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
mpol_put(vma_policy(new));
out_free_vma:
vm_area_free(new);
+ validate_mm_mt(mm);
return err;
}
@@ -2746,38 +2272,48 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return __split_vma(mm, vma, addr, new_below);
}
-/* Munmap is split into 2 main parts -- this part which finds
- * what needs doing, and the areas themselves, which do the
- * work. This now handles partial unmappings.
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
- struct list_head *uf, bool downgrade)
+static inline int munmap_sidetree(struct vm_area_struct *vma,
+ struct ma_state *mas_detach)
{
- unsigned long end;
- struct vm_area_struct *vma, *prev, *last;
-
- if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
- return -EINVAL;
+ mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
+ if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
+ return -ENOMEM;
- len = PAGE_ALIGN(len);
- end = start + len;
- if (len == 0)
- return -EINVAL;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm -= vma_pages(vma);
- /*
- * arch_unmap() might do unmaps itself. It must be called
- * and finish any rbtree manipulation before this code
- * runs and also starts to manipulate the rbtree.
- */
- arch_unmap(mm, start, end);
+ return 0;
+}
- /* Find the first overlapping VMA where start < vma->vm_end */
- vma = find_vma_intersection(mm, start, end);
- if (!vma)
- return 0;
- prev = vma->vm_prev;
+/*
+ * do_mas_align_munmap() - munmap the aligned region from @start to @end.
+ * @mas: The maple_state, ideally set up to alter the correct tree location.
+ * @vma: The starting vm_area_struct
+ * @mm: The mm_struct
+ * @start: The aligned start address to munmap.
+ * @end: The aligned end address to munmap.
+ * @uf: The userfaultfd list_head
+ * @downgrade: Set to true to attempt a write downgrade of the mmap_sem
+ *
+ * If @downgrade is true, check return code for potential release of the lock.
+ */
+static int
+do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, struct list_head *uf, bool downgrade)
+{
+ struct vm_area_struct *prev, *next = NULL;
+ struct maple_tree mt_detach;
+ int count = 0;
+ int error = -ENOMEM;
+ MA_STATE(mas_detach, &mt_detach, 0, 0);
+ mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt_detach, &mm->mmap_lock);
+
+ if (mas_preallocate(mas, vma, GFP_KERNEL))
+ return -ENOMEM;
+ mas->last = end - 1;
/*
* If we need to split any vma, do it now to save pain later.
*
@@ -2785,8 +2321,9 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
* unmapped vm_area_struct will remain in use: so lower split_vma
* places tmp vma above, and higher split_vma places tmp vma below.
*/
+
+ /* Does it split the first one? */
if (start > vma->vm_start) {
- int error;
/*
* Make sure that map_count on return from munmap() will
@@ -2794,22 +2331,61 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
* its limit temporarily, to help free resources as expected.
*/
if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
- return -ENOMEM;
+ goto map_count_exceeded;
+ /*
+ * mas_pause() is not needed since mas->index needs to be set
+ * differently than vma->vm_end anyways.
+ */
error = __split_vma(mm, vma, start, 0);
if (error)
- return error;
- prev = vma;
+ goto start_split_failed;
+
+ mas_set(mas, start);
+ vma = mas_walk(mas);
}
- /* Does it split the last one? */
- last = find_vma(mm, end);
- if (last && end > last->vm_start) {
- int error = __split_vma(mm, last, end, 1);
+ prev = mas_prev(mas, 0);
+ if (unlikely((!prev)))
+ mas_set(mas, start);
+
+ /*
+ * Detach a range of VMAs from the mm. Using next as a temp variable as
+ * it is always overwritten.
+ */
+ mas_for_each(mas, next, end - 1) {
+ /* Does it split the end? */
+ if (next->vm_end > end) {
+ struct vm_area_struct *split;
+
+ error = __split_vma(mm, next, end, 1);
+ if (error)
+ goto end_split_failed;
+
+ mas_set(mas, end);
+ split = mas_prev(mas, 0);
+ error = munmap_sidetree(split, &mas_detach);
+ if (error)
+ goto munmap_sidetree_failed;
+
+ count++;
+ if (vma == next)
+ vma = split;
+ break;
+ }
+ error = munmap_sidetree(next, &mas_detach);
if (error)
- return error;
+ goto munmap_sidetree_failed;
+
+ count++;
+#ifdef CONFIG_DEBUG_VM_MAPLE_TREE
+ BUG_ON(next->vm_start < start);
+ BUG_ON(next->vm_start > end);
+#endif
}
- vma = vma_next(mm, prev);
+
+ if (!next)
+ next = mas_next(mas, ULONG_MAX);
if (unlikely(uf)) {
/*
@@ -2821,30 +2397,366 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
* split, despite we could. This is unlikely enough
* failure that it's not worth optimizing it for.
*/
- int error = userfaultfd_unmap_prep(vma, start, end, uf);
+ error = userfaultfd_unmap_prep(mm, start, end, uf);
+
if (error)
- return error;
+ goto userfaultfd_error;
+ }
+
+ /* Point of no return */
+ mas_set_range(mas, start, end - 1);
+#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
+ /* Make sure no VMAs are about to be lost. */
+ {
+ MA_STATE(test, &mt_detach, start, end - 1);
+ struct vm_area_struct *vma_mas, *vma_test;
+ int test_count = 0;
+
+ rcu_read_lock();
+ vma_test = mas_find(&test, end - 1);
+ mas_for_each(mas, vma_mas, end - 1) {
+ BUG_ON(vma_mas != vma_test);
+ test_count++;
+ vma_test = mas_next(&test, end - 1);
+ }
+ rcu_read_unlock();
+ BUG_ON(count != test_count);
+ mas_set_range(mas, start, end - 1);
+ }
+#endif
+ mas_store_prealloc(mas, NULL);
+ mm->map_count -= count;
+ /*
+ * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
+ * VM_GROWSUP VMA. Such VMAs can change their size under
+ * down_read(mmap_lock) and collide with the VMA we are about to unmap.
+ */
+ if (downgrade) {
+ if (next && (next->vm_flags & VM_GROWSDOWN))
+ downgrade = false;
+ else if (prev && (prev->vm_flags & VM_GROWSUP))
+ downgrade = false;
+ else
+ mmap_write_downgrade(mm);
}
- /* Detach vmas from rbtree */
- if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
- downgrade = false;
+ unmap_region(mm, &mt_detach, vma, prev, next, start, end);
+ /* Statistics and freeing VMAs */
+ mas_set(&mas_detach, start);
+ remove_mt(mm, &mas_detach);
+ __mt_destroy(&mt_detach);
- if (downgrade)
- mmap_write_downgrade(mm);
- unmap_region(mm, vma, prev, start, end);
+ validate_mm(mm);
+ return downgrade ? 1 : 0;
- /* Fix up all other VM information */
- remove_vma_list(mm, vma);
+userfaultfd_error:
+munmap_sidetree_failed:
+end_split_failed:
+ __mt_destroy(&mt_detach);
+start_split_failed:
+map_count_exceeded:
+ mas_destroy(mas);
+ return error;
+}
- return downgrade ? 1 : 0;
+/*
+ * do_mas_munmap() - munmap a given range.
+ * @mas: The maple state
+ * @mm: The mm_struct
+ * @start: The start address to munmap
+ * @len: The length of the range to munmap
+ * @uf: The userfaultfd list_head
+ * @downgrade: set to true if the user wants to attempt to write_downgrade the
+ * mmap_sem
+ *
+ * This function takes a @mas that is either pointing to the previous VMA or set
+ * to MA_START and sets it up to remove the mapping(s). The @len will be
+ * aligned and any arch_unmap work will be preformed.
+ *
+ * Returns: -EINVAL on failure, 1 on success and unlock, 0 otherwise.
+ */
+int do_mas_munmap(struct ma_state *mas, struct mm_struct *mm,
+ unsigned long start, size_t len, struct list_head *uf,
+ bool downgrade)
+{
+ unsigned long end;
+ struct vm_area_struct *vma;
+
+ if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+ end = start + PAGE_ALIGN(len);
+ if (end == start)
+ return -EINVAL;
+
+ /* arch_unmap() might do unmaps itself. */
+ arch_unmap(mm, start, end);
+
+ /* Find the first overlapping VMA */
+ vma = mas_find(mas, end - 1);
+ if (!vma)
+ return 0;
+
+ return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade);
}
+/* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
+ * @mm: The mm_struct
+ * @start: The start address to munmap
+ * @len: The length to be munmapped.
+ * @uf: The userfaultfd list_head
+ */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
struct list_head *uf)
{
- return __do_munmap(mm, start, len, uf, false);
+ MA_STATE(mas, &mm->mm_mt, start, start);
+
+ return do_mas_munmap(&mas, mm, start, len, uf, false);
+}
+
+unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+ struct list_head *uf)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = NULL;
+ struct vm_area_struct *next, *prev, *merge;
+ pgoff_t pglen = len >> PAGE_SHIFT;
+ unsigned long charged = 0;
+ unsigned long end = addr + len;
+ unsigned long merge_start = addr, merge_end = end;
+ pgoff_t vm_pgoff;
+ int error;
+ MA_STATE(mas, &mm->mm_mt, addr, end - 1);
+
+ /* Check against address space limit. */
+ if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
+ unsigned long nr_pages;
+
+ /*
+ * MAP_FIXED may remove pages of mappings that intersects with
+ * requested mapping. Account for the pages it would unmap.
+ */
+ nr_pages = count_vma_pages_range(mm, addr, end);
+
+ if (!may_expand_vm(mm, vm_flags,
+ (len >> PAGE_SHIFT) - nr_pages))
+ return -ENOMEM;
+ }
+
+ /* Unmap any existing mapping in the area */
+ if (do_mas_munmap(&mas, mm, addr, len, uf, false))
+ return -ENOMEM;
+
+ /*
+ * Private writable mapping: check memory availability
+ */
+ if (accountable_mapping(file, vm_flags)) {
+ charged = len >> PAGE_SHIFT;
+ if (security_vm_enough_memory_mm(mm, charged))
+ return -ENOMEM;
+ vm_flags |= VM_ACCOUNT;
+ }
+
+ next = mas_next(&mas, ULONG_MAX);
+ prev = mas_prev(&mas, 0);
+ if (vm_flags & VM_SPECIAL)
+ goto cannot_expand;
+
+ /* Attempt to expand an old mapping */
+ /* Check next */
+ if (next && next->vm_start == end && !vma_policy(next) &&
+ can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
+ NULL_VM_UFFD_CTX, NULL)) {
+ merge_end = next->vm_end;
+ vma = next;
+ vm_pgoff = next->vm_pgoff - pglen;
+ }
+
+ /* Check prev */
+ if (prev && prev->vm_end == addr && !vma_policy(prev) &&
+ (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
+ pgoff, vma->vm_userfaultfd_ctx, NULL) :
+ can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
+ NULL_VM_UFFD_CTX, NULL))) {
+ merge_start = prev->vm_start;
+ vma = prev;
+ vm_pgoff = prev->vm_pgoff;
+ }
+
+
+ /* Actually expand, if possible */
+ if (vma &&
+ !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) {
+ khugepaged_enter_vma(vma, vm_flags);
+ goto expanded;
+ }
+
+ mas.index = addr;
+ mas.last = end - 1;
+cannot_expand:
+ /*
+ * Determine the object being mapped and call the appropriate
+ * specific mapper. the address has already been validated, but
+ * not unmapped, but the maps are removed from the list.
+ */
+ vma = vm_area_alloc(mm);
+ if (!vma) {
+ error = -ENOMEM;
+ goto unacct_error;
+ }
+
+ vma->vm_start = addr;
+ vma->vm_end = end;
+ vma->vm_flags = vm_flags;
+ vma->vm_page_prot = vm_get_page_prot(vm_flags);
+ vma->vm_pgoff = pgoff;
+
+ if (file) {
+ if (vm_flags & VM_SHARED) {
+ error = mapping_map_writable(file->f_mapping);
+ if (error)
+ goto free_vma;
+ }
+
+ vma->vm_file = get_file(file);
+ error = call_mmap(file, vma);
+ if (error)
+ goto unmap_and_free_vma;
+
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+ * f_op->mmap method. -DaveM
+ */
+ WARN_ON_ONCE(addr != vma->vm_start);
+
+ addr = vma->vm_start;
+ mas_reset(&mas);
+
+ /*
+ * If vm_flags changed after call_mmap(), we should try merge
+ * vma again as we may succeed this time.
+ */
+ if (unlikely(vm_flags != vma->vm_flags && prev)) {
+ merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
+ NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
+ if (merge) {
+ /*
+ * ->mmap() can change vma->vm_file and fput
+ * the original file. So fput the vma->vm_file
+ * here or we would add an extra fput for file
+ * and cause general protection fault
+ * ultimately.
+ */
+ fput(vma->vm_file);
+ vm_area_free(vma);
+ vma = merge;
+ /* Update vm_flags to pick up the change. */
+ addr = vma->vm_start;
+ vm_flags = vma->vm_flags;
+ goto unmap_writable;
+ }
+ }
+
+ vm_flags = vma->vm_flags;
+ } else if (vm_flags & VM_SHARED) {
+ error = shmem_zero_setup(vma);
+ if (error)
+ goto free_vma;
+ } else {
+ vma_set_anonymous(vma);
+ }
+
+ /* Allow architectures to sanity-check the vm_flags */
+ if (!arch_validate_flags(vma->vm_flags)) {
+ error = -EINVAL;
+ if (file)
+ goto unmap_and_free_vma;
+ else
+ goto free_vma;
+ }
+
+ if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
+ error = -ENOMEM;
+ if (file)
+ goto unmap_and_free_vma;
+ else
+ goto free_vma;
+ }
+
+ if (vma->vm_file)
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+
+ vma_mas_store(vma, &mas);
+ mm->map_count++;
+ if (vma->vm_file) {
+ if (vma->vm_flags & VM_SHARED)
+ mapping_allow_writable(vma->vm_file->f_mapping);
+
+ flush_dcache_mmap_lock(vma->vm_file->f_mapping);
+ vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
+ flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
+ }
+
+ /*
+ * vma_merge() calls khugepaged_enter_vma() either, the below
+ * call covers the non-merge case.
+ */
+ khugepaged_enter_vma(vma, vma->vm_flags);
+
+ /* Once vma denies write, undo our temporary denial count */
+unmap_writable:
+ if (file && vm_flags & VM_SHARED)
+ mapping_unmap_writable(file->f_mapping);
+ file = vma->vm_file;
+expanded:
+ perf_event_mmap(vma);
+
+ vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+ if (vm_flags & VM_LOCKED) {
+ if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
+ is_vm_hugetlb_page(vma) ||
+ vma == get_gate_vma(current->mm))
+ vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+ else
+ mm->locked_vm += (len >> PAGE_SHIFT);
+ }
+
+ if (file)
+ uprobe_mmap(vma);
+
+ /*
+ * New (or expanded) vma always get soft dirty status.
+ * Otherwise user-space soft-dirty page tracker won't
+ * be able to distinguish situation when vma area unmapped,
+ * then new mapped in-place (which must be aimed as
+ * a completely new data area).
+ */
+ vma->vm_flags |= VM_SOFTDIRTY;
+
+ vma_set_page_prot(vma);
+
+ validate_mm(mm);
+ return addr;
+
+unmap_and_free_vma:
+ fput(vma->vm_file);
+ vma->vm_file = NULL;
+
+ /* Undo any partial mapping done by a device driver. */
+ unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
+ if (vm_flags & VM_SHARED)
+ mapping_unmap_writable(file->f_mapping);
+free_vma:
+ vm_area_free(vma);
+unacct_error:
+ if (charged)
+ vm_unacct_memory(charged);
+ validate_mm(mm);
+ return error;
}
static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
@@ -2852,11 +2764,12 @@ static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
int ret;
struct mm_struct *mm = current->mm;
LIST_HEAD(uf);
+ MA_STATE(mas, &mm->mm_mt, start, start);
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = __do_munmap(mm, start, len, &uf, downgrade);
+ ret = do_mas_munmap(&mas, mm, start, len, &uf, downgrade);
/*
* Returning 1 indicates mmap_lock is downgraded.
* But 1 is not legal return value of vm_munmap() and munmap(), reset
@@ -2922,11 +2835,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
goto out;
if (start + size > vma->vm_end) {
- struct vm_area_struct *next;
+ VMA_ITERATOR(vmi, mm, vma->vm_end);
+ struct vm_area_struct *next, *prev = vma;
- for (next = vma->vm_next; next; next = next->vm_next) {
+ for_each_vma_range(vmi, next, start + size) {
/* hole between vmas ? */
- if (next->vm_start != next->vm_prev->vm_end)
+ if (next->vm_start != prev->vm_end)
goto out;
if (next->vm_file != vma->vm_file)
@@ -2935,8 +2849,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
if (next->vm_flags != vma->vm_flags)
goto out;
- if (start + size <= next->vm_end)
- break;
+ prev = next;
}
if (!next)
@@ -2966,37 +2879,53 @@ out:
}
/*
- * this is really a simplified "do_mmap". it only handles
- * anonymous maps. eventually we may be able to do some
- * brk-specific accounting here.
+ * brk_munmap() - Unmap a parital vma.
+ * @mas: The maple tree state.
+ * @vma: The vma to be modified
+ * @newbrk: the start of the address to unmap
+ * @oldbrk: The end of the address to unmap
+ * @uf: The userfaultfd list_head
+ *
+ * Returns: 1 on success.
+ * unmaps a partial VMA mapping. Does not handle alignment, downgrades lock if
+ * possible.
*/
-static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
+static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long newbrk, unsigned long oldbrk,
+ struct list_head *uf)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma, *prev;
- struct rb_node **rb_link, *rb_parent;
- pgoff_t pgoff = addr >> PAGE_SHIFT;
- int error;
- unsigned long mapped_addr;
-
- /* Until we need other flags, refuse anything except VM_EXEC. */
- if ((flags & (~VM_EXEC)) != 0)
- return -EINVAL;
- flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
-
- mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
- if (IS_ERR_VALUE(mapped_addr))
- return mapped_addr;
+ struct mm_struct *mm = vma->vm_mm;
+ int ret;
- error = mlock_future_check(mm, mm->def_flags, len);
- if (error)
- return error;
+ arch_unmap(mm, newbrk, oldbrk);
+ ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf, true);
+ validate_mm_mt(mm);
+ return ret;
+}
- /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
- if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
- return -ENOMEM;
+/*
+ * do_brk_flags() - Increase the brk vma if the flags match.
+ * @mas: The maple tree state.
+ * @addr: The start address
+ * @len: The length of the increase
+ * @vma: The vma,
+ * @flags: The VMA Flags
+ *
+ * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
+ * do not match then create a new anonymous VMA. Eventually we may be able to
+ * do some brk-specific accounting here.
+ */
+static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long len, unsigned long flags)
+{
+ struct mm_struct *mm = current->mm;
- /* Check against address space limits *after* clearing old maps... */
+ validate_mm_mt(mm);
+ /*
+ * Check against address space limits by the changed size
+ * Note: This happens *after* clearing old mappings in some code paths.
+ */
+ flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
return -ENOMEM;
@@ -3006,28 +2935,49 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
return -ENOMEM;
- /* Can we just expand an old private anonymous mapping? */
- vma = vma_merge(mm, prev, addr, addr + len, flags,
- NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
- if (vma)
- goto out;
-
/*
- * create a vma struct for an anonymous mapping
+ * Expand the existing vma if possible; Note that singular lists do not
+ * occur after forking, so the expand will only happen on new VMAs.
*/
- vma = vm_area_alloc(mm);
- if (!vma) {
- vm_unacct_memory(len >> PAGE_SHIFT);
- return -ENOMEM;
+ if (vma &&
+ (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) &&
+ ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) {
+ mas->index = vma->vm_start;
+ mas->last = addr + len - 1;
+ vma_adjust_trans_huge(vma, addr, addr + len, 0);
+ if (vma->anon_vma) {
+ anon_vma_lock_write(vma->anon_vma);
+ anon_vma_interval_tree_pre_update_vma(vma);
+ }
+ vma->vm_end = addr + len;
+ vma->vm_flags |= VM_SOFTDIRTY;
+ if (mas_store_gfp(mas, vma, GFP_KERNEL))
+ goto mas_expand_failed;
+
+ if (vma->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
+ }
+ khugepaged_enter_vma(vma, flags);
+ goto out;
}
+ /* create a vma struct for an anonymous mapping */
+ vma = vm_area_alloc(mm);
+ if (!vma)
+ goto vma_alloc_fail;
+
vma_set_anonymous(vma);
vma->vm_start = addr;
vma->vm_end = addr + len;
- vma->vm_pgoff = pgoff;
+ vma->vm_pgoff = addr >> PAGE_SHIFT;
vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags);
- vma_link(mm, vma, prev, rb_link, rb_parent);
+ mas_set_range(mas, vma->vm_start, addr + len - 1);
+ if (mas_store_gfp(mas, vma, GFP_KERNEL))
+ goto mas_store_fail;
+
+ mm->map_count++;
out:
perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT;
@@ -3035,16 +2985,32 @@ out:
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
+ validate_mm(mm);
return 0;
+
+mas_store_fail:
+ vm_area_free(vma);
+vma_alloc_fail:
+ vm_unacct_memory(len >> PAGE_SHIFT);
+ return -ENOMEM;
+
+mas_expand_failed:
+ if (vma->anon_vma) {
+ anon_vma_interval_tree_post_update_vma(vma);
+ anon_vma_unlock_write(vma->anon_vma);
+ }
+ return -ENOMEM;
}
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = NULL;
unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
len = PAGE_ALIGN(request);
if (len < request)
@@ -3055,13 +3021,36 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (mmap_write_lock_killable(mm))
return -EINTR;
- ret = do_brk_flags(addr, len, flags, &uf);
+ /* Until we need other flags, refuse anything except VM_EXEC. */
+ if ((flags & (~VM_EXEC)) != 0)
+ return -EINVAL;
+
+ ret = check_brk_limits(addr, len);
+ if (ret)
+ goto limits_failed;
+
+ ret = do_mas_munmap(&mas, mm, addr, len, &uf, 0);
+ if (ret)
+ goto munmap_failed;
+
+ vma = mas_prev(&mas, 0);
+ if (!vma || vma->vm_end != addr || vma_policy(vma) ||
+ !can_vma_merge_after(vma, flags, NULL, NULL,
+ addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL))
+ vma = NULL;
+
+ ret = do_brk_flags(&mas, vma, addr, len, flags);
populate = ((mm->def_flags & VM_LOCKED) != 0);
mmap_write_unlock(mm);
userfaultfd_unmap_complete(mm, &uf);
if (populate && !ret)
mm_populate(addr, len);
return ret;
+
+munmap_failed:
+limits_failed:
+ mmap_write_unlock(mm);
+ return ret;
}
EXPORT_SYMBOL(vm_brk_flags);
@@ -3077,34 +3066,19 @@ void exit_mmap(struct mm_struct *mm)
struct mmu_gather tlb;
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
+ int count = 0;
/* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm);
- if (unlikely(mm_is_oom_victim(mm))) {
- /*
- * Manually reap the mm to free as much memory as possible.
- * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
- * this mm from further consideration. Taking mm->mmap_lock for
- * write after setting MMF_OOM_SKIP will guarantee that the oom
- * reaper will not run on this mm again after mmap_lock is
- * dropped.
- *
- * Nothing can be holding mm->mmap_lock here and the above call
- * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
- * __oom_reap_task_mm() will not block.
- */
- (void)__oom_reap_task_mm(mm);
- set_bit(MMF_OOM_SKIP, &mm->flags);
- }
-
- mmap_write_lock(mm);
+ mmap_read_lock(mm);
arch_exit_mmap(mm);
- vma = mm->mmap;
+ vma = mas_find(&mas, ULONG_MAX);
if (!vma) {
/* Can happen if dup_mmap() received an OOM */
- mmap_write_unlock(mm);
+ mmap_read_unlock(mm);
return;
}
@@ -3112,19 +3086,37 @@ void exit_mmap(struct mm_struct *mm)
flush_cache_mm(mm);
tlb_gather_mmu_fullmm(&tlb, mm);
/* update_hiwater_rss(mm) here? but nobody should be looking */
- /* Use -1 here to ensure all VMAs in the mm are unmapped */
- unmap_vmas(&tlb, vma, 0, -1);
- free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
+ /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
+ unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
+ mmap_read_unlock(mm);
+
+ /*
+ * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
+ * because the memory has been already freed.
+ */
+ set_bit(MMF_OOM_SKIP, &mm->flags);
+ mmap_write_lock(mm);
+ free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
+ USER_PGTABLES_CEILING);
tlb_finish_mmu(&tlb);
- /* Walk the list again, actually closing and freeing it. */
- while (vma) {
+ /*
+ * Walk the list again, actually closing and freeing it, with preemption
+ * enabled, without holding any MM locks besides the unreachable
+ * mmap_write_lock.
+ */
+ do {
if (vma->vm_flags & VM_ACCOUNT)
nr_accounted += vma_pages(vma);
- vma = remove_vma(vma);
+ remove_vma(vma);
+ count++;
cond_resched();
- }
- mm->mmap = NULL;
+ } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
+
+ BUG_ON(count != mm->map_count);
+
+ trace_exit_mmap(mm);
+ __mt_destroy(&mm->mm_mt);
mmap_write_unlock(mm);
vm_unacct_memory(nr_accounted);
}
@@ -3135,14 +3127,14 @@ void exit_mmap(struct mm_struct *mm)
*/
int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
{
- struct vm_area_struct *prev;
- struct rb_node **rb_link, *rb_parent;
+ unsigned long charged = vma_pages(vma);
+
- if (find_vma_links(mm, vma->vm_start, vma->vm_end,
- &prev, &rb_link, &rb_parent))
+ if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
return -ENOMEM;
+
if ((vma->vm_flags & VM_ACCOUNT) &&
- security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ security_vm_enough_memory_mm(mm, charged))
return -ENOMEM;
/*
@@ -3162,7 +3154,11 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
}
- vma_link(mm, vma, prev, rb_link, rb_parent);
+ if (vma_link(mm, vma)) {
+ vm_unacct_memory(charged);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -3178,9 +3174,9 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
unsigned long vma_start = vma->vm_start;
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *new_vma, *prev;
- struct rb_node **rb_link, *rb_parent;
bool faulted_in_anon_vma = true;
+ validate_mm_mt(mm);
/*
* If anonymous vma has not yet been faulted, update new pgoff
* to match new location, to increase its chance of merging.
@@ -3190,8 +3186,10 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
faulted_in_anon_vma = false;
}
- if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
+ new_vma = find_vma_prev(mm, addr, &prev);
+ if (new_vma && new_vma->vm_start < addr + len)
return NULL; /* should never get here */
+
new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
@@ -3232,16 +3230,22 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
get_file(new_vma->vm_file);
if (new_vma->vm_ops && new_vma->vm_ops->open)
new_vma->vm_ops->open(new_vma);
- vma_link(mm, new_vma, prev, rb_link, rb_parent);
+ if (vma_link(mm, new_vma))
+ goto out_vma_link;
*need_rmap_locks = false;
}
+ validate_mm_mt(mm);
return new_vma;
+out_vma_link:
+ if (new_vma->vm_ops && new_vma->vm_ops->close)
+ new_vma->vm_ops->close(new_vma);
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
vm_area_free(new_vma);
out:
+ validate_mm_mt(mm);
return NULL;
}
@@ -3378,6 +3382,7 @@ static struct vm_area_struct *__install_special_mapping(
int ret;
struct vm_area_struct *vma;
+ validate_mm_mt(mm);
vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
@@ -3400,10 +3405,12 @@ static struct vm_area_struct *__install_special_mapping(
perf_event_mmap(vma);
+ validate_mm_mt(mm);
return vma;
out:
vm_area_free(vma);
+ validate_mm_mt(mm);
return ERR_PTR(ret);
}
@@ -3528,12 +3535,13 @@ int mm_take_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_assert_write_locked(mm);
mutex_lock(&mm_all_locks_mutex);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3541,7 +3549,8 @@ int mm_take_all_locks(struct mm_struct *mm)
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_set(&mas, 0);
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
goto out_unlock;
if (vma->vm_file && vma->vm_file->f_mapping &&
@@ -3549,7 +3558,8 @@ int mm_take_all_locks(struct mm_struct *mm)
vm_lock_mapping(mm, vma->vm_file->f_mapping);
}
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_set(&mas, 0);
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (signal_pending(current))
goto out_unlock;
if (vma->anon_vma)
@@ -3608,11 +3618,12 @@ void mm_drop_all_locks(struct mm_struct *mm)
{
struct vm_area_struct *vma;
struct anon_vma_chain *avc;
+ MA_STATE(mas, &mm->mm_mt, 0, 0);
mmap_assert_write_locked(mm);
BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ mas_for_each(&mas, vma, ULONG_MAX) {
if (vma->anon_vma)
list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
vm_unlock_anon_vma(avc->anon_vma);
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 0ae7571e35ab..68e1511be12d 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -88,6 +88,8 @@ void lruvec_init(struct lruvec *lruvec)
* Poison its list head, so that any operations on it would crash.
*/
list_del(&lruvec->lists[LRU_UNEVICTABLE]);
+
+ lru_gen_init_lruvec(lruvec);
}
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bc6bddd156ca..461dcbd4f21a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -31,6 +31,7 @@
#include <linux/pgtable.h>
#include <linux/sched/sysctl.h>
#include <linux/userfaultfd_k.h>
+#include <linux/memory-tiers.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -121,6 +122,7 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
if (prot_numa) {
struct page *page;
int nid;
+ bool toptier;
/* Avoid TLB flush if possible */
if (pte_protnone(oldpte))
@@ -150,14 +152,19 @@ static unsigned long change_pte_range(struct mmu_gather *tlb,
nid = page_to_nid(page);
if (target_node == nid)
continue;
+ toptier = node_is_toptier(nid);
/*
* Skip scanning top tier node if normal numa
* balancing is disabled
*/
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) &&
- node_is_toptier(nid))
+ toptier)
continue;
+ if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING &&
+ !toptier)
+ xchg_page_access_time(page,
+ jiffies_to_msecs(jiffies));
}
oldpte = ptep_modify_prot_start(vma, addr, pte);
@@ -669,6 +676,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
(prot & PROT_READ);
struct mmu_gather tlb;
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
start = untagged_addr(start);
@@ -700,7 +708,8 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
goto out;
- vma = find_vma(current->mm, start);
+ mas_set(&mas, start);
+ vma = mas_find(&mas, ULONG_MAX);
error = -ENOMEM;
if (!vma)
goto out;
@@ -726,7 +735,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
if (start > vma->vm_start)
prev = vma;
else
- prev = vma->vm_prev;
+ prev = mas_prev(&mas, 0);
tlb_gather_mmu(&tlb, current->mm);
for (nstart = start ; ; ) {
@@ -789,7 +798,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
if (nstart >= end)
break;
- vma = prev->vm_next;
+ vma = find_vma(current->mm, prev->vm_end);
if (!vma || vma->vm_start != nstart) {
error = -ENOMEM;
break;
diff --git a/mm/mremap.c b/mm/mremap.c
index b522cd0259a0..e465ffe279bb 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -9,6 +9,7 @@
*/
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/ksm.h>
@@ -23,6 +24,7 @@
#include <linux/mmu_notifier.h>
#include <linux/uaccess.h>
#include <linux/userfaultfd_k.h>
+#include <linux/mempolicy.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
@@ -716,7 +718,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (excess) {
vma->vm_flags |= VM_ACCOUNT;
if (split)
- vma->vm_next->vm_flags |= VM_ACCOUNT;
+ find_vma(mm, vma->vm_end)->vm_flags |= VM_ACCOUNT;
}
return new_addr;
@@ -866,9 +868,10 @@ out:
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
unsigned long end = vma->vm_end + delta;
+
if (end < vma->vm_end) /* overflow */
return 0;
- if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
+ if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
return 0;
if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
0, MAP_FIXED) & ~PAGE_MASK)
@@ -975,20 +978,23 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
/*
* Always allow a shrinking remap: that just unmaps
* the unnecessary pages..
- * __do_munmap does all the needed commit accounting, and
+ * do_mas_munmap does all the needed commit accounting, and
* downgrades mmap_lock to read if so directed.
*/
if (old_len >= new_len) {
int retval;
+ MA_STATE(mas, &mm->mm_mt, addr + new_len, addr + new_len);
- retval = __do_munmap(mm, addr+new_len, old_len - new_len,
- &uf_unmap, true);
- if (retval < 0 && old_len != new_len) {
- ret = retval;
- goto out;
+ retval = do_mas_munmap(&mas, mm, addr + new_len,
+ old_len - new_len, &uf_unmap, true);
/* Returning 1 indicates mmap_lock is downgraded to read. */
- } else if (retval == 1)
+ if (retval == 1) {
downgraded = true;
+ } else if (retval < 0 && old_len != new_len) {
+ ret = retval;
+ goto out;
+ }
+
ret = addr;
goto out;
}
@@ -1008,6 +1014,9 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
/* can we just expand the current mapping? */
if (vma_expandable(vma, new_len - old_len)) {
long pages = (new_len - old_len) >> PAGE_SHIFT;
+ unsigned long extension_start = addr + old_len;
+ unsigned long extension_end = addr + new_len;
+ pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT);
if (vma->vm_flags & VM_ACCOUNT) {
if (security_vm_enough_memory_mm(mm, pages)) {
@@ -1016,8 +1025,18 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
}
}
- if (vma_adjust(vma, vma->vm_start, addr + new_len,
- vma->vm_pgoff, NULL)) {
+ /*
+ * Function vma_merge() is called on the extension we are adding to
+ * the already existing vma, vma_merge() will merge this extension with
+ * the already existing vma (expand operation itself) and possibly also
+ * with the next vma if it becomes adjacent to the expanded vma and
+ * otherwise compatible.
+ */
+ vma = vma_merge(mm, vma, extension_start, extension_end,
+ vma->vm_flags, vma->anon_vma, vma->vm_file,
+ extension_pgoff, vma_policy(vma),
+ vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+ if (!vma) {
vm_unacct_memory(pages);
ret = -ENOMEM;
goto out;
diff --git a/mm/msync.c b/mm/msync.c
index 137d1c104f3e..ac4c9bfea2e7 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -104,7 +104,7 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
error = 0;
goto out_unlock;
}
- vma = vma->vm_next;
+ vma = find_vma(mm, vma->vm_end);
}
}
out_unlock:
diff --git a/mm/nommu.c b/mm/nommu.c
index e819cbc21b39..214c70e1d059 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -19,7 +19,6 @@
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
-#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
@@ -545,26 +544,27 @@ static void put_nommu_region(struct vm_region *region)
__put_nommu_region(region);
}
-/*
- * add a VMA into a process's mm_struct in the appropriate place in the list
- * and tree and add to the address space's page tree also if not an anonymous
- * page
- * - should be called with mm->mmap_lock held writelocked
- */
-static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas)
{
- struct vm_area_struct *pvma, *prev;
- struct address_space *mapping;
- struct rb_node **p, *parent, *rb_prev;
+ mas_set_range(mas, vma->vm_start, vma->vm_end - 1);
+ mas_store_prealloc(mas, vma);
+}
- BUG_ON(!vma->vm_region);
+void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
+{
+ mas->index = vma->vm_start;
+ mas->last = vma->vm_end - 1;
+ mas_store_prealloc(mas, NULL);
+}
+static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
+{
mm->map_count++;
vma->vm_mm = mm;
/* add the VMA to the mapping */
if (vma->vm_file) {
- mapping = vma->vm_file->f_mapping;
+ struct address_space *mapping = vma->vm_file->f_mapping;
i_mmap_lock_write(mapping);
flush_dcache_mmap_lock(mapping);
@@ -572,67 +572,51 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
flush_dcache_mmap_unlock(mapping);
i_mmap_unlock_write(mapping);
}
+}
- /* add the VMA to the tree */
- parent = rb_prev = NULL;
- p = &mm->mm_rb.rb_node;
- while (*p) {
- parent = *p;
- pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
-
- /* sort by: start addr, end addr, VMA struct addr in that order
- * (the latter is necessary as we may get identical VMAs) */
- if (vma->vm_start < pvma->vm_start)
- p = &(*p)->rb_left;
- else if (vma->vm_start > pvma->vm_start) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else if (vma->vm_end < pvma->vm_end)
- p = &(*p)->rb_left;
- else if (vma->vm_end > pvma->vm_end) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else if (vma < pvma)
- p = &(*p)->rb_left;
- else if (vma > pvma) {
- rb_prev = parent;
- p = &(*p)->rb_right;
- } else
- BUG();
- }
-
- rb_link_node(&vma->vm_rb, parent, p);
- rb_insert_color(&vma->vm_rb, &mm->mm_rb);
+/*
+ * mas_add_vma_to_mm() - Maple state variant of add_mas_to_mm().
+ * @mas: The maple state with preallocations.
+ * @mm: The mm_struct
+ * @vma: The vma to add
+ *
+ */
+static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
+ struct vm_area_struct *vma)
+{
+ BUG_ON(!vma->vm_region);
- /* add VMA to the VMA list also */
- prev = NULL;
- if (rb_prev)
- prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
+ setup_vma_to_mm(vma, mm);
- __vma_link_list(mm, vma, prev);
+ /* add the VMA to the tree */
+ vma_mas_store(vma, mas);
}
/*
- * delete a VMA from its owning mm_struct and address space
+ * add a VMA into a process's mm_struct in the appropriate place in the list
+ * and tree and add to the address space's page tree also if not an anonymous
+ * page
+ * - should be called with mm->mmap_lock held writelocked
*/
-static void delete_vma_from_mm(struct vm_area_struct *vma)
-{
- int i;
- struct address_space *mapping;
- struct mm_struct *mm = vma->vm_mm;
- struct task_struct *curr = current;
-
- mm->map_count--;
- for (i = 0; i < VMACACHE_SIZE; i++) {
- /* if the vma is cached, invalidate the entire cache */
- if (curr->vmacache.vmas[i] == vma) {
- vmacache_invalidate(mm);
- break;
- }
+static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
+
+ if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
+ pr_warn("Allocation of vma tree for process %d failed\n",
+ current->pid);
+ return -ENOMEM;
}
+ mas_add_vma_to_mm(&mas, mm, vma);
+ return 0;
+}
+static void cleanup_vma_from_mm(struct vm_area_struct *vma)
+{
+ vma->vm_mm->map_count--;
/* remove the VMA from the mapping */
if (vma->vm_file) {
+ struct address_space *mapping;
mapping = vma->vm_file->f_mapping;
i_mmap_lock_write(mapping);
@@ -641,11 +625,24 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
flush_dcache_mmap_unlock(mapping);
i_mmap_unlock_write(mapping);
}
+}
+/*
+ * delete a VMA from its owning mm_struct and address space
+ */
+static int delete_vma_from_mm(struct vm_area_struct *vma)
+{
+ MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0);
- /* remove from the MM's tree and list */
- rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
+ pr_warn("Allocation of vma tree for process %d failed\n",
+ current->pid);
+ return -ENOMEM;
+ }
+ cleanup_vma_from_mm(vma);
- __vma_unlink_list(mm, vma);
+ /* remove from the MM's tree and list */
+ vma_mas_remove(vma, &mas);
+ return 0;
}
/*
@@ -661,31 +658,26 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
vm_area_free(vma);
}
+struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
+ unsigned long start_addr,
+ unsigned long end_addr)
+{
+ unsigned long index = start_addr;
+
+ mmap_assert_locked(mm);
+ return mt_find(&mm->mm_mt, &index, end_addr - 1);
+}
+EXPORT_SYMBOL(find_vma_intersection);
+
/*
* look up the first VMA in which addr resides, NULL if none
* - should be called with mm->mmap_lock at least held readlocked
*/
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
- struct vm_area_struct *vma;
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
- /* check the cache first */
- vma = vmacache_find(mm, addr);
- if (likely(vma))
- return vma;
-
- /* trawl the list (there may be multiple mappings in which addr
- * resides) */
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start > addr)
- return NULL;
- if (vma->vm_end > addr) {
- vmacache_update(addr, vma);
- return vma;
- }
- }
-
- return NULL;
+ return mas_walk(&mas);
}
EXPORT_SYMBOL(find_vma);
@@ -717,26 +709,17 @@ static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
{
struct vm_area_struct *vma;
unsigned long end = addr + len;
+ MA_STATE(mas, &mm->mm_mt, addr, addr);
- /* check the cache first */
- vma = vmacache_find_exact(mm, addr, end);
- if (vma)
- return vma;
-
- /* trawl the list (there may be multiple mappings in which addr
- * resides) */
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start < addr)
- continue;
- if (vma->vm_start > addr)
- return NULL;
- if (vma->vm_end == end) {
- vmacache_update(addr, vma);
- return vma;
- }
- }
+ vma = mas_walk(&mas);
+ if (!vma)
+ return NULL;
+ if (vma->vm_start != addr)
+ return NULL;
+ if (vma->vm_end != end)
+ return NULL;
- return NULL;
+ return vma;
}
/*
@@ -1069,6 +1052,7 @@ unsigned long do_mmap(struct file *file,
vm_flags_t vm_flags;
unsigned long capabilities, result;
int ret;
+ MA_STATE(mas, &current->mm->mm_mt, 0, 0);
*populate = 0;
@@ -1087,6 +1071,7 @@ unsigned long do_mmap(struct file *file,
* now know into VMA flags */
vm_flags = determine_vm_flags(file, prot, flags, capabilities);
+
/* we're going to need to record the mapping */
region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
if (!region)
@@ -1096,6 +1081,9 @@ unsigned long do_mmap(struct file *file,
if (!vma)
goto error_getting_vma;
+ if (mas_preallocate(&mas, vma, GFP_KERNEL))
+ goto error_maple_preallocate;
+
region->vm_usage = 1;
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
@@ -1236,7 +1224,7 @@ unsigned long do_mmap(struct file *file,
current->mm->total_vm += len >> PAGE_SHIFT;
share:
- add_vma_to_mm(current->mm, vma);
+ mas_add_vma_to_mm(&mas, current->mm, vma);
/* we flush the region from the icache only when the first executable
* mapping of it is made */
@@ -1262,6 +1250,7 @@ error:
sharing_violation:
up_write(&nommu_region_sem);
+ mas_destroy(&mas);
pr_warn("Attempt to share mismatched mappings\n");
ret = -EINVAL;
goto error;
@@ -1278,6 +1267,14 @@ error_getting_region:
len, current->pid);
show_free_areas(0, NULL);
return -ENOMEM;
+
+error_maple_preallocate:
+ kmem_cache_free(vm_region_jar, region);
+ vm_area_free(vma);
+ pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
+ show_free_areas(0, NULL);
+ return -ENOMEM;
+
}
unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
@@ -1343,6 +1340,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *new;
struct vm_region *region;
unsigned long npages;
+ MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end);
/* we're only permitted to split anonymous regions (these should have
* only a single usage on the region) */
@@ -1357,9 +1355,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return -ENOMEM;
new = vm_area_dup(vma);
- if (!new) {
- kmem_cache_free(vm_region_jar, region);
- return -ENOMEM;
+ if (!new)
+ goto err_vma_dup;
+
+ if (mas_preallocate(&mas, vma, GFP_KERNEL)) {
+ pr_warn("Allocation of vma tree for process %d failed\n",
+ current->pid);
+ goto err_mas_preallocate;
}
/* most fields are the same, copy all, and then fixup */
@@ -1378,7 +1380,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- delete_vma_from_mm(vma);
down_write(&nommu_region_sem);
delete_nommu_region(vma->vm_region);
if (new_below) {
@@ -1391,9 +1392,19 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
add_nommu_region(vma->vm_region);
add_nommu_region(new->vm_region);
up_write(&nommu_region_sem);
- add_vma_to_mm(mm, vma);
- add_vma_to_mm(mm, new);
+
+ setup_vma_to_mm(vma, mm);
+ setup_vma_to_mm(new, mm);
+ mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
+ mas_store(&mas, vma);
+ vma_mas_store(new, &mas);
return 0;
+
+err_mas_preallocate:
+ vm_area_free(new);
+err_vma_dup:
+ kmem_cache_free(vm_region_jar, region);
+ return -ENOMEM;
}
/*
@@ -1408,12 +1419,14 @@ static int shrink_vma(struct mm_struct *mm,
/* adjust the VMA's pointers, which may reposition it in the MM's tree
* and list */
- delete_vma_from_mm(vma);
+ if (delete_vma_from_mm(vma))
+ return -ENOMEM;
if (from > vma->vm_start)
vma->vm_end = from;
else
vma->vm_start = to;
- add_vma_to_mm(mm, vma);
+ if (add_vma_to_mm(mm, vma))
+ return -ENOMEM;
/* cut the backing region down to size */
region = vma->vm_region;
@@ -1441,9 +1454,10 @@ static int shrink_vma(struct mm_struct *mm,
*/
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
{
+ MA_STATE(mas, &mm->mm_mt, start, start);
struct vm_area_struct *vma;
unsigned long end;
- int ret;
+ int ret = 0;
len = PAGE_ALIGN(len);
if (len == 0)
@@ -1452,7 +1466,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
end = start + len;
/* find the first potentially overlapping VMA */
- vma = find_vma(mm, start);
+ vma = mas_find(&mas, end - 1);
if (!vma) {
static int limit;
if (limit < 5) {
@@ -1471,7 +1485,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
return -EINVAL;
if (end == vma->vm_end)
goto erase_whole_vma;
- vma = vma->vm_next;
+ vma = mas_next(&mas, end - 1);
} while (vma);
return -EINVAL;
} else {
@@ -1493,9 +1507,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
}
erase_whole_vma:
- delete_vma_from_mm(vma);
+ if (delete_vma_from_mm(vma))
+ ret = -ENOMEM;
delete_vma(mm, vma);
- return 0;
+ return ret;
}
int vm_munmap(unsigned long addr, size_t len)
@@ -1520,6 +1535,7 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
*/
void exit_mmap(struct mm_struct *mm)
{
+ VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
if (!mm)
@@ -1527,12 +1543,18 @@ void exit_mmap(struct mm_struct *mm)
mm->total_vm = 0;
- while ((vma = mm->mmap)) {
- mm->mmap = vma->vm_next;
- delete_vma_from_mm(vma);
+ /*
+ * Lock the mm to avoid assert complaining even though this is the only
+ * user of the mm
+ */
+ mmap_write_lock(mm);
+ for_each_vma(vmi, vma) {
+ cleanup_vma_from_mm(vma);
delete_vma(mm, vma);
cond_resched();
}
+ __mt_destroy(&mm->mm_mt);
+ mmap_write_unlock(mm);
}
int vm_brk(unsigned long addr, unsigned long len)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3c6cf9e3cd66..1276e49b31b0 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -461,7 +461,7 @@ static void dump_header(struct oom_control *oc, struct task_struct *p)
if (is_memcg_oom(oc))
mem_cgroup_print_oom_meminfo(oc->memcg);
else {
- show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
+ __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask));
if (should_dump_unreclaim_slab())
dump_unreclaimable_slab();
}
@@ -509,10 +509,11 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
static struct task_struct *oom_reaper_list;
static DEFINE_SPINLOCK(oom_reaper_lock);
-bool __oom_reap_task_mm(struct mm_struct *mm)
+static bool __oom_reap_task_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
bool ret = true;
+ VMA_ITERATOR(vmi, mm, 0);
/*
* Tell all users of get_user/copy_from_user etc... that the content
@@ -522,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
*/
set_bit(MMF_UNSTABLE, &mm->flags);
- for (vma = mm->mmap ; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (vma->vm_flags & (VM_HUGETLB|VM_PFNMAP))
continue;
@@ -764,10 +765,8 @@ static void mark_oom_victim(struct task_struct *tsk)
return;
/* oom_mm is bound to the signal struct life time. */
- if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
+ if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm))
mmgrab(tsk->signal->oom_mm);
- set_bit(MMF_OOM_VICTIM, &mm->flags);
- }
/*
* Make sure that the task is woken up from uninterruptible sleep
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 08522a831c7a..ac2c9f12a7b2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -27,6 +27,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
+#include <linux/kmsan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -482,6 +483,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
{
static unsigned long prev_end_pfn, nr_initialised;
+ if (early_page_ext_enabled())
+ return false;
/*
* prev_end_pfn static that contains the end of previous zone
* No need to protect because called very early in boot before smp_init.
@@ -542,7 +545,7 @@ static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
#else
- pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
+ pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn);
#endif /* CONFIG_SPARSEMEM */
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
}
@@ -870,7 +873,8 @@ static inline bool set_page_guard(struct zone *zone, struct page *page,
INIT_LIST_HEAD(&page->buddy_list);
set_page_private(page, order);
/* Guard pages are not available for any usage */
- __mod_zone_freepage_state(zone, -(1 << order), migratetype);
+ if (!is_migrate_isolate(migratetype))
+ __mod_zone_freepage_state(zone, -(1 << order), migratetype);
return true;
}
@@ -900,7 +904,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
* order of appearance. So we need to first gather the full picture of what was
* enabled, and then make decisions.
*/
-void init_mem_debugging_and_hardening(void)
+void __init init_mem_debugging_and_hardening(void)
{
bool page_poisoning_requested = false;
@@ -935,6 +939,10 @@ void init_mem_debugging_and_hardening(void)
else
static_branch_disable(&init_on_free);
+ if (IS_ENABLED(CONFIG_KMSAN) &&
+ (_init_on_alloc_enabled_early || _init_on_free_enabled_early))
+ pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n");
+
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!debug_pagealloc_enabled())
return;
@@ -1105,7 +1113,7 @@ static inline void __free_one_page(struct page *page,
int migratetype, fpi_t fpi_flags)
{
struct capture_control *capc = task_capc(zone);
- unsigned long buddy_pfn;
+ unsigned long buddy_pfn = 0;
unsigned long combined_pfn;
struct page *buddy;
bool to_tail;
@@ -1283,20 +1291,20 @@ static const char *page_bad_reason(struct page *page, unsigned long flags)
return bad_reason;
}
-static void check_free_page_bad(struct page *page)
+static void free_page_is_bad_report(struct page *page)
{
bad_page(page,
page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
}
-static inline int check_free_page(struct page *page)
+static inline bool free_page_is_bad(struct page *page)
{
if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
- return 0;
+ return false;
/* Something has gone sideways, find it */
- check_free_page_bad(page);
- return 1;
+ free_page_is_bad_report(page);
+ return true;
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
@@ -1398,6 +1406,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
+ kmsan_free_page(page, order);
if (unlikely(PageHWPoison(page)) && !order) {
/*
@@ -1428,7 +1437,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
for (i = 1; i < (1 << order); i++) {
if (compound)
bad += free_tail_pages_check(page, page + i);
- if (unlikely(check_free_page(page + i))) {
+ if (unlikely(free_page_is_bad(page + i))) {
bad++;
continue;
}
@@ -1439,8 +1448,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
page->mapping = NULL;
if (memcg_kmem_enabled() && PageMemcgKmem(page))
__memcg_kmem_uncharge_page(page, order);
- if (check_free)
- bad += check_free_page(page);
+ if (check_free && free_page_is_bad(page))
+ bad++;
if (bad)
return false;
@@ -1499,10 +1508,11 @@ static bool free_pcp_prepare(struct page *page, unsigned int order)
return free_pages_prepare(page, order, true, FPI_NONE);
}
+/* return true if this page has an inappropriate state */
static bool bulkfree_pcp_prepare(struct page *page)
{
if (debug_pagealloc_enabled_static())
- return check_free_page(page);
+ return free_page_is_bad(page);
else
return false;
}
@@ -1523,7 +1533,7 @@ static bool free_pcp_prepare(struct page *page, unsigned int order)
static bool bulkfree_pcp_prepare(struct page *page)
{
- return check_free_page(page);
+ return free_page_is_bad(page);
}
#endif /* CONFIG_DEBUG_VM */
@@ -1575,7 +1585,6 @@ static void free_pcppages_bulk(struct zone *zone, int count,
order = pindex_to_order(pindex);
nr_pages = 1 << order;
- BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
do {
int mt;
@@ -1804,6 +1813,10 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn,
{
if (early_page_uninitialised(pfn))
return;
+ if (!kmsan_memblock_free_pages(page, order)) {
+ /* KMSAN will take care of these pages. */
+ return;
+ }
__free_pages_core(page, order);
}
@@ -1855,7 +1868,7 @@ void set_zone_contiguous(struct zone *zone)
unsigned long block_start_pfn = zone->zone_start_pfn;
unsigned long block_end_pfn;
- block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(block_start_pfn);
for (; block_start_pfn < zone_end_pfn(zone);
block_start_pfn = block_end_pfn,
block_end_pfn += pageblock_nr_pages) {
@@ -1890,15 +1903,14 @@ static void __init deferred_free_range(unsigned long pfn,
page = pfn_to_page(pfn);
/* Free a large naturally-aligned chunk if possible */
- if (nr_pages == pageblock_nr_pages &&
- (pfn & (pageblock_nr_pages - 1)) == 0) {
+ if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_core(page, pageblock_order);
return;
}
for (i = 0; i < nr_pages; i++, page++, pfn++) {
- if ((pfn & (pageblock_nr_pages - 1)) == 0)
+ if (pageblock_aligned(pfn))
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
__free_pages_core(page, 0);
}
@@ -1917,16 +1929,12 @@ static inline void __init pgdat_init_report_one_done(void)
/*
* Returns true if page needs to be initialized or freed to buddy allocator.
*
- * First we check if pfn is valid on architectures where it is possible to have
- * holes within pageblock_nr_pages. On systems where it is not possible, this
- * function is optimized out.
- *
- * Then, we check if a current large page is valid by only checking the validity
+ * We check if a current large page is valid by only checking the validity
* of the head pfn.
*/
static inline bool __init deferred_pfn_valid(unsigned long pfn)
{
- if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
+ if (pageblock_aligned(pfn) && !pfn_valid(pfn))
return false;
return true;
}
@@ -1938,14 +1946,13 @@ static inline bool __init deferred_pfn_valid(unsigned long pfn)
static void __init deferred_free_pages(unsigned long pfn,
unsigned long end_pfn)
{
- unsigned long nr_pgmask = pageblock_nr_pages - 1;
unsigned long nr_free = 0;
for (; pfn < end_pfn; pfn++) {
if (!deferred_pfn_valid(pfn)) {
deferred_free_range(pfn - nr_free, nr_free);
nr_free = 0;
- } else if (!(pfn & nr_pgmask)) {
+ } else if (pageblock_aligned(pfn)) {
deferred_free_range(pfn - nr_free, nr_free);
nr_free = 1;
} else {
@@ -1965,7 +1972,6 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
unsigned long pfn,
unsigned long end_pfn)
{
- unsigned long nr_pgmask = pageblock_nr_pages - 1;
int nid = zone_to_nid(zone);
unsigned long nr_pages = 0;
int zid = zone_idx(zone);
@@ -1975,7 +1981,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone,
if (!deferred_pfn_valid(pfn)) {
page = NULL;
continue;
- } else if (!page || !(pfn & nr_pgmask)) {
+ } else if (!page || pageblock_aligned(pfn)) {
page = pfn_to_page(pfn);
} else {
page++;
@@ -2651,8 +2657,8 @@ int move_freepages_block(struct zone *zone, struct page *page,
*num_movable = 0;
pfn = page_to_pfn(page);
- start_pfn = pfn & ~(pageblock_nr_pages - 1);
- end_pfn = start_pfn + pageblock_nr_pages - 1;
+ start_pfn = pageblock_start_pfn(pfn);
+ end_pfn = pageblock_end_pfn(pfn) - 1;
/* Do not cross zone boundaries */
if (!zone_spans_pfn(zone, start_pfn))
@@ -3010,7 +3016,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
* i.e. orders < pageblock_order. If there are no local zones free,
* the zonelists will be reiterated without ALLOC_NOFRAGMENT.
*/
- if (alloc_flags & ALLOC_NOFRAGMENT)
+ if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT)
min_order = pageblock_order;
/*
@@ -3598,16 +3604,11 @@ EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order)
{
- unsigned long watermark;
- struct zone *zone;
- int mt;
-
- BUG_ON(!PageBuddy(page));
-
- zone = page_zone(page);
- mt = get_pageblock_migratetype(page);
+ struct zone *zone = page_zone(page);
+ int mt = get_pageblock_migratetype(page);
if (!is_migrate_isolate(mt)) {
+ unsigned long watermark;
/*
* Obey watermarks as if the page was being allocated. We can
* emulate a high-order watermark check with a raised order-0
@@ -3621,8 +3622,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
__mod_zone_freepage_state(zone, -(1UL << order), mt);
}
- /* Remove page from free list */
-
del_page_from_free_list(page, zone, order);
/*
@@ -3643,7 +3642,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
}
-
return 1UL << order;
}
@@ -3670,8 +3668,6 @@ void __putback_isolated_page(struct page *page, unsigned int order, int mt)
/*
* Update NUMA hit/miss statistics
- *
- * Must be called with interrupts disabled.
*/
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
long nr_account)
@@ -3777,8 +3773,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
/* Lock and remove page from the per-cpu list */
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
- gfp_t gfp_flags, int migratetype,
- unsigned int alloc_flags)
+ int migratetype, unsigned int alloc_flags)
{
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -3815,8 +3810,17 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
}
/*
- * Allocate a page from the given zone. Use pcplists for order-0 allocations.
+ * Allocate a page from the given zone.
+ * Use pcplists for THP or "cheap" high-order allocations.
+ */
+
+/*
+ * Do not instrument rmqueue() with KMSAN. This function may call
+ * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
+ * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
+ * may call rmqueue() again, which will result in a deadlock.
*/
+__no_sanitize_memory
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
@@ -3839,7 +3843,7 @@ struct page *rmqueue(struct zone *preferred_zone,
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
- gfp_flags, migratetype, alloc_flags);
+ migratetype, alloc_flags);
if (likely(page))
goto out;
}
@@ -4329,7 +4333,7 @@ static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
filter &= ~SHOW_MEM_FILTER_NODES;
- show_mem(filter, nodemask);
+ __show_mem(filter, nodemask, gfp_zone(gfp_mask));
}
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
@@ -5147,7 +5151,8 @@ retry:
reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
if (reserve_flags)
- alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
+ alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
+ (alloc_flags & ALLOC_KSWAPD);
/*
* Reset the nodemask and zonelist iterators if memory policies can be
@@ -5272,7 +5277,7 @@ nopage:
* so that we can identify them and convert them to something
* else.
*/
- WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask);
+ WARN_ON_ONCE_GFP(costly_order, gfp_mask);
/*
* Help non-failing allocations by giving them access to memory
@@ -5569,6 +5574,7 @@ out:
}
trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
+ kmsan_alloc_page(page, order, alloc_gfp);
return page;
}
@@ -6057,6 +6063,15 @@ static void show_migration_types(unsigned char type)
printk(KERN_CONT "(%s) ", tmp);
}
+static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx)
+{
+ int zone_idx;
+ for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++)
+ if (zone_managed_pages(pgdat->node_zones + zone_idx))
+ return true;
+ return false;
+}
+
/*
* Show free area list (used inside shift_scroll-lock stuff)
* We also calculate the percentage fragmentation. We do this by counting the
@@ -6066,7 +6081,7 @@ static void show_migration_types(unsigned char type)
* SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
* cpuset.
*/
-void show_free_areas(unsigned int filter, nodemask_t *nodemask)
+void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx)
{
unsigned long free_pcp = 0;
int cpu, nid;
@@ -6074,6 +6089,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
pg_data_t *pgdat;
for_each_populated_zone(zone) {
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
@@ -6113,6 +6130,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
for_each_online_pgdat(pgdat) {
if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
continue;
+ if (!node_has_managed_zones(pgdat, max_zone_idx))
+ continue;
printk("Node %d"
" active_anon:%lukB"
@@ -6171,6 +6190,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
for_each_populated_zone(zone) {
int i;
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
@@ -6232,6 +6253,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
unsigned long nr[MAX_ORDER], flags, total = 0;
unsigned char types[MAX_ORDER];
+ if (zone_idx(zone) > max_zone_idx)
+ continue;
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
continue;
show_node(zone);
@@ -6557,7 +6580,7 @@ static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonesta
#define BOOT_PAGESET_BATCH 1
static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset);
static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats);
-DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
+static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static void __build_all_zonelists(void *data)
{
@@ -6753,7 +6776,7 @@ void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone
* such that unmovable allocations won't be scattered all
* over the place during system boot.
*/
- if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
+ if (pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, migratetype);
cond_resched();
}
@@ -6796,7 +6819,7 @@ static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
* Please note that MEMINIT_HOTPLUG path doesn't clear memmap
* because this is done early in section_activate()
*/
- if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
+ if (pageblock_aligned(pfn)) {
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
cond_resched();
}
@@ -6859,7 +6882,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
unsigned long start = jiffies;
int nid = pgdat->node_id;
- if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
+ if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE))
return;
/*
@@ -6928,9 +6951,8 @@ static void __init init_unavailable_range(unsigned long spfn,
u64 pgcnt = 0;
for (pfn = spfn; pfn < epfn; pfn++) {
- if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
- pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
- + pageblock_nr_pages - 1;
+ if (!pfn_valid(pageblock_start_pfn(pfn))) {
+ pfn = pageblock_end_pfn(pfn) - 1;
continue;
}
__init_single_page(pfn_to_page(pfn), pfn, zone, node);
@@ -7035,7 +7057,7 @@ static int zone_batchsize(struct zone *zone)
* size is striking a balance between allocation latency
* and zone lock contention.
*/
- batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE);
+ batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE);
batch /= 4; /* We effectively *= 4 below */
if (batch < 1)
batch = 1;
@@ -7220,6 +7242,17 @@ void __meminit setup_zone_pageset(struct zone *zone)
}
/*
+ * The zone indicated has a new number of managed_pages; batch sizes and percpu
+ * page high values need to be recalculated.
+ */
+static void zone_pcp_update(struct zone *zone, int cpu_online)
+{
+ mutex_lock(&pcp_batch_high_lock);
+ zone_set_pageset_high_and_batch(zone, cpu_online);
+ mutex_unlock(&pcp_batch_high_lock);
+}
+
+/*
* Allocate per cpu pagesets and initialize them.
* Before this call only boot pagesets were available.
*/
@@ -7663,6 +7696,7 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
int i;
pgdat_resize_init(pgdat);
+ pgdat_kswapd_lock_init(pgdat);
pgdat_init_split_queue(pgdat);
pgdat_init_kcompactd(pgdat);
@@ -7957,17 +7991,6 @@ unsigned long __init node_map_pfn_alignment(void)
return ~accl_mask + 1;
}
-/**
- * find_min_pfn_with_active_regions - Find the minimum PFN registered
- *
- * Return: the minimum PFN based on information provided via
- * memblock_set_node().
- */
-unsigned long __init find_min_pfn_with_active_regions(void)
-{
- return PHYS_PFN(memblock_start_of_DRAM());
-}
-
/*
* early_calculate_totalpages()
* Sum pages in active regions for movable zone.
@@ -8260,7 +8283,7 @@ void __init free_area_init(unsigned long *max_zone_pfn)
memset(arch_zone_highest_possible_pfn, 0,
sizeof(arch_zone_highest_possible_pfn));
- start_pfn = find_min_pfn_with_active_regions();
+ start_pfn = PHYS_PFN(memblock_start_of_DRAM());
descending = arch_has_descending_max_zone_pfns();
for (i = 0; i < MAX_NR_ZONES; i++) {
@@ -8509,8 +8532,8 @@ void __init mem_init_print_info(void)
#endif
")\n",
K(nr_free_pages()), K(physpages),
- codesize >> 10, datasize >> 10, rosize >> 10,
- (init_data_size + init_code_size) >> 10, bss_size >> 10,
+ codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K,
+ (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K,
K(physpages - totalram_pages() - totalcma_pages),
K(totalcma_pages)
#ifdef CONFIG_HIGHMEM
@@ -9023,7 +9046,7 @@ void *__init alloc_large_system_hash(const char *tablename,
{
unsigned long long max = high_limit;
unsigned long log2qty, size;
- void *table = NULL;
+ void *table;
gfp_t gfp_flags;
bool virt;
bool huge;
@@ -9035,8 +9058,8 @@ void *__init alloc_large_system_hash(const char *tablename,
numentries -= arch_reserved_kernel_pages();
/* It isn't necessary when PAGE_SIZE >= 1MB */
- if (PAGE_SHIFT < 20)
- numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
+ if (PAGE_SIZE < SZ_1M)
+ numentries = round_up(numentries, SZ_1M / PAGE_SIZE);
#if __BITS_PER_LONG > 32
if (!high_limit) {
@@ -9461,17 +9484,6 @@ void free_contig_range(unsigned long pfn, unsigned long nr_pages)
EXPORT_SYMBOL(free_contig_range);
/*
- * The zone indicated has a new number of managed_pages; batch sizes and percpu
- * page high values need to be recalculated.
- */
-void zone_pcp_update(struct zone *zone, int cpu_online)
-{
- mutex_lock(&pcp_batch_high_lock);
- zone_set_pageset_high_and_batch(zone, cpu_online);
- mutex_unlock(&pcp_batch_high_lock);
-}
-
-/*
* Effectively disable pcplists for the zone by setting the high limit to 0
* and draining all cpus. A concurrent page freeing on another CPU that's about
* to put the page on pcplist will either finish before the drain and the page
@@ -9503,9 +9515,11 @@ void zone_pcp_reset(struct zone *zone)
drain_zonestat(zone, pzstats);
}
free_percpu(zone->per_cpu_pageset);
- free_percpu(zone->per_cpu_zonestats);
zone->per_cpu_pageset = &boot_pageset;
- zone->per_cpu_zonestats = &boot_zonestats;
+ if (zone->per_cpu_zonestats != &boot_zonestats) {
+ free_percpu(zone->per_cpu_zonestats);
+ zone->per_cpu_zonestats = &boot_zonestats;
+ }
}
}
diff --git a/mm/page_counter.c b/mm/page_counter.c
index eb156ff5d603..db20d6452b71 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -17,24 +17,23 @@ static void propagate_protected_usage(struct page_counter *c,
unsigned long usage)
{
unsigned long protected, old_protected;
- unsigned long low, min;
long delta;
if (!c->parent)
return;
- min = READ_ONCE(c->min);
- if (min || atomic_long_read(&c->min_usage)) {
- protected = min(usage, min);
+ protected = min(usage, READ_ONCE(c->min));
+ old_protected = atomic_long_read(&c->min_usage);
+ if (protected != old_protected) {
old_protected = atomic_long_xchg(&c->min_usage, protected);
delta = protected - old_protected;
if (delta)
atomic_long_add(delta, &c->parent->children_min_usage);
}
- low = READ_ONCE(c->low);
- if (low || atomic_long_read(&c->low_usage)) {
- protected = min(usage, low);
+ protected = min(usage, READ_ONCE(c->low));
+ old_protected = atomic_long_read(&c->low_usage);
+ if (protected != old_protected) {
old_protected = atomic_long_xchg(&c->low_usage, protected);
delta = protected - old_protected;
if (delta)
@@ -193,7 +192,7 @@ int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
old = xchg(&counter->max, nr_pages);
- if (page_counter_read(counter) <= usage)
+ if (page_counter_read(counter) <= usage || nr_pages >= old)
return 0;
counter->max = old;
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 3dc715d7ac29..affe80243b6d 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -9,6 +9,7 @@
#include <linux/page_owner.h>
#include <linux/page_idle.h>
#include <linux/page_table_check.h>
+#include <linux/rcupdate.h>
/*
* struct page extension
@@ -59,6 +60,10 @@
* can utilize this callback to initialize the state of it correctly.
*/
+#ifdef CONFIG_SPARSEMEM
+#define PAGE_EXT_INVALID (0x1)
+#endif
+
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
static bool need_page_idle(void)
{
@@ -84,6 +89,15 @@ static struct page_ext_operations *page_ext_ops[] __initdata = {
unsigned long page_ext_size = sizeof(struct page_ext);
static unsigned long total_usage;
+static struct page_ext *lookup_page_ext(const struct page *page);
+
+bool early_page_ext;
+static int __init setup_early_page_ext(char *str)
+{
+ early_page_ext = true;
+ return 0;
+}
+early_param("early_page_ext", setup_early_page_ext);
static bool __init invoke_need_callbacks(void)
{
@@ -125,6 +139,48 @@ static inline struct page_ext *get_entry(void *base, unsigned long index)
return base + page_ext_size * index;
}
+/**
+ * page_ext_get() - Get the extended information for a page.
+ * @page: The page we're interested in.
+ *
+ * Ensures that the page_ext will remain valid until page_ext_put()
+ * is called.
+ *
+ * Return: NULL if no page_ext exists for this page.
+ * Context: Any context. Caller may not sleep until they have called
+ * page_ext_put().
+ */
+struct page_ext *page_ext_get(struct page *page)
+{
+ struct page_ext *page_ext;
+
+ rcu_read_lock();
+ page_ext = lookup_page_ext(page);
+ if (!page_ext) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ return page_ext;
+}
+
+/**
+ * page_ext_put() - Working with page extended information is done.
+ * @page_ext - Page extended information received from page_ext_get().
+ *
+ * The page extended information of the page may not be valid after this
+ * function is called.
+ *
+ * Return: None.
+ * Context: Any context with corresponding page_ext_get() is called.
+ */
+void page_ext_put(struct page_ext *page_ext)
+{
+ if (unlikely(!page_ext))
+ return;
+
+ rcu_read_unlock();
+}
#ifndef CONFIG_SPARSEMEM
@@ -133,12 +189,13 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat)
pgdat->node_page_ext = NULL;
}
-struct page_ext *lookup_page_ext(const struct page *page)
+static struct page_ext *lookup_page_ext(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
unsigned long index;
struct page_ext *base;
+ WARN_ON_ONCE(!rcu_read_lock_held());
base = NODE_DATA(page_to_nid(page))->node_page_ext;
/*
* The sanity checks the page allocator does upon freeing a
@@ -206,20 +263,27 @@ fail:
}
#else /* CONFIG_SPARSEMEM */
+static bool page_ext_invalid(struct page_ext *page_ext)
+{
+ return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
+}
-struct page_ext *lookup_page_ext(const struct page *page)
+static struct page_ext *lookup_page_ext(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
+ struct page_ext *page_ext = READ_ONCE(section->page_ext);
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug.
*/
- if (!section->page_ext)
+ if (page_ext_invalid(page_ext))
return NULL;
- return get_entry(section->page_ext, pfn);
+ return get_entry(page_ext, pfn);
}
static void *__meminit alloc_page_ext(size_t size, int nid)
@@ -298,9 +362,30 @@ static void __free_page_ext(unsigned long pfn)
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
- base = get_entry(ms->page_ext, pfn);
+
+ base = READ_ONCE(ms->page_ext);
+ /*
+ * page_ext here can be valid while doing the roll back
+ * operation in online_page_ext().
+ */
+ if (page_ext_invalid(base))
+ base = (void *)base - PAGE_EXT_INVALID;
+ WRITE_ONCE(ms->page_ext, NULL);
+
+ base = get_entry(base, pfn);
free_page_ext(base);
- ms->page_ext = NULL;
+}
+
+static void __invalidate_page_ext(unsigned long pfn)
+{
+ struct mem_section *ms;
+ void *val;
+
+ ms = __pfn_to_section(pfn);
+ if (!ms || !ms->page_ext)
+ return;
+ val = (void *)ms->page_ext + PAGE_EXT_INVALID;
+ WRITE_ONCE(ms->page_ext, val);
}
static int __meminit online_page_ext(unsigned long start_pfn,
@@ -336,13 +421,27 @@ static int __meminit online_page_ext(unsigned long start_pfn,
}
static int __meminit offline_page_ext(unsigned long start_pfn,
- unsigned long nr_pages, int nid)
+ unsigned long nr_pages)
{
unsigned long start, end, pfn;
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);
+ /*
+ * Freeing of page_ext is done in 3 steps to avoid
+ * use-after-free of it:
+ * 1) Traverse all the sections and mark their page_ext
+ * as invalid.
+ * 2) Wait for all the existing users of page_ext who
+ * started before invalidation to finish.
+ * 3) Free the page_ext.
+ */
+ for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
+ __invalidate_page_ext(pfn);
+
+ synchronize_rcu();
+
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_ext(pfn);
return 0;
@@ -362,11 +461,11 @@ static int __meminit page_ext_callback(struct notifier_block *self,
break;
case MEM_OFFLINE:
offline_page_ext(mn->start_pfn,
- mn->nr_pages, mn->status_change_nid);
+ mn->nr_pages);
break;
case MEM_CANCEL_ONLINE:
offline_page_ext(mn->start_pfn,
- mn->nr_pages, mn->status_change_nid);
+ mn->nr_pages);
break;
case MEM_GOING_OFFLINE:
break;
diff --git a/mm/page_io.c b/mm/page_io.c
index 68318134dc92..2af34dd8fa4d 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -28,7 +28,7 @@
#include <linux/delayacct.h>
#include "swap.h"
-void end_swap_bio_write(struct bio *bio)
+static void end_swap_bio_write(struct bio *bio)
{
struct page *page = bio_first_page_all(bio);
@@ -180,29 +180,30 @@ bad_bmap:
*/
int swap_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
int ret = 0;
- if (try_to_free_swap(page)) {
- unlock_page(page);
+ if (folio_free_swap(folio)) {
+ folio_unlock(folio);
goto out;
}
/*
* Arch code may have to preserve more data than just the page
* contents, e.g. memory tags.
*/
- ret = arch_prepare_to_swap(page);
+ ret = arch_prepare_to_swap(&folio->page);
if (ret) {
- set_page_dirty(page);
- unlock_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
goto out;
}
- if (frontswap_store(page) == 0) {
- set_page_writeback(page);
- unlock_page(page);
- end_page_writeback(page);
+ if (frontswap_store(&folio->page) == 0) {
+ folio_start_writeback(folio);
+ folio_unlock(folio);
+ folio_end_writeback(folio);
goto out;
}
- ret = __swap_writepage(page, wbc, end_swap_bio_write);
+ ret = __swap_writepage(&folio->page, wbc);
out:
return ret;
}
@@ -332,8 +333,7 @@ static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
return 0;
}
-int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func)
+int __swap_writepage(struct page *page, struct writeback_control *wbc)
{
struct bio *bio;
int ret;
@@ -358,7 +358,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
GFP_NOIO);
bio->bi_iter.bi_sector = swap_page_sector(page);
- bio->bi_end_io = end_write_func;
+ bio->bi_end_io = end_swap_bio_write;
bio_add_page(bio, page, thp_size(page), 0);
bio_associate_blkg_from_page(bio, page);
@@ -453,18 +453,21 @@ int swap_readpage(struct page *page, bool synchronous,
struct swap_info_struct *sis = page_swap_info(page);
bool workingset = PageWorkingset(page);
unsigned long pflags;
+ bool in_thrashing;
VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageUptodate(page), page);
/*
- * Count submission time as memory stall. When the device is congested,
- * or the submitting cgroup IO-throttled, submission can be a
- * significant part of overall IO time.
+ * Count submission time as memory stall and delay. When the device
+ * is congested, or the submitting cgroup IO-throttled, submission
+ * can be a significant part of overall IO time.
*/
- if (workingset)
+ if (workingset) {
+ delayacct_thrashing_start(&in_thrashing);
psi_memstall_enter(&pflags);
+ }
delayacct_swapin_start();
if (frontswap_load(page) == 0) {
@@ -513,8 +516,10 @@ int swap_readpage(struct page *page, bool synchronous,
bio_put(bio);
out:
- if (workingset)
+ if (workingset) {
+ delayacct_thrashing_end(&in_thrashing);
psi_memstall_leave(&pflags);
+ }
delayacct_swapin_end();
return ret;
}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index eb3a68ca92ad..04141a9bea70 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -37,8 +37,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
struct zone *zone = page_zone(page);
unsigned long pfn;
- VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) !=
- ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages));
+ VM_BUG_ON(pageblock_start_pfn(start_pfn) !=
+ pageblock_start_pfn(end_pfn - 1));
if (is_migrate_cma_page(page)) {
/*
@@ -172,7 +172,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
* to avoid redundant checks.
*/
check_unmovable_start = max(page_to_pfn(page), start_pfn);
- check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages),
+ check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
end_pfn);
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
@@ -312,7 +312,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
struct zone *zone;
int ret;
- VM_BUG_ON(!IS_ALIGNED(boundary_pfn, pageblock_nr_pages));
+ VM_BUG_ON(!pageblock_aligned(boundary_pfn));
if (isolate_before)
isolate_pageblock = boundary_pfn - pageblock_nr_pages;
@@ -532,8 +532,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
unsigned long pfn;
struct page *page;
/* isolation is done at page block granularity */
- unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
+ unsigned long isolate_start = pageblock_start_pfn(start_pfn);
+ unsigned long isolate_end = pageblock_align(end_pfn);
int ret;
bool skip_isolation = false;
@@ -579,9 +579,8 @@ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
{
unsigned long pfn;
struct page *page;
- unsigned long isolate_start = ALIGN_DOWN(start_pfn, pageblock_nr_pages);
- unsigned long isolate_end = ALIGN(end_pfn, pageblock_nr_pages);
-
+ unsigned long isolate_start = pageblock_start_pfn(start_pfn);
+ unsigned long isolate_end = pageblock_align(end_pfn);
for (pfn = isolate_start;
pfn < isolate_end;
diff --git a/mm/page_owner.c b/mm/page_owner.c
index e4c6f3f1695b..2d27f532df4c 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -141,7 +141,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
struct page_owner *page_owner;
u64 free_ts_nsec = local_clock();
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
@@ -153,6 +153,7 @@ void __reset_page_owner(struct page *page, unsigned short order)
page_owner->free_ts_nsec = free_ts_nsec;
page_ext = page_ext_next(page_ext);
}
+ page_ext_put(page_ext);
}
static inline void __set_page_owner_handle(struct page_ext *page_ext,
@@ -183,19 +184,21 @@ static inline void __set_page_owner_handle(struct page_ext *page_ext,
noinline void __set_page_owner(struct page *page, unsigned short order,
gfp_t gfp_mask)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext;
depot_stack_handle_t handle;
+ handle = save_stack(gfp_mask);
+
+ page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
-
- handle = save_stack(gfp_mask);
__set_page_owner_handle(page_ext, handle, order, gfp_mask);
+ page_ext_put(page_ext);
}
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@@ -203,12 +206,13 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
page_owner = get_page_owner(page_ext);
page_owner->last_migrate_reason = reason;
+ page_ext_put(page_ext);
}
void __split_page_owner(struct page *page, unsigned int nr)
{
int i;
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@@ -219,17 +223,25 @@ void __split_page_owner(struct page *page, unsigned int nr)
page_owner->order = 0;
page_ext = page_ext_next(page_ext);
}
+ page_ext_put(page_ext);
}
void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{
- struct page_ext *old_ext = lookup_page_ext(&old->page);
- struct page_ext *new_ext = lookup_page_ext(&newfolio->page);
+ struct page_ext *old_ext;
+ struct page_ext *new_ext;
struct page_owner *old_page_owner, *new_page_owner;
- if (unlikely(!old_ext || !new_ext))
+ old_ext = page_ext_get(&old->page);
+ if (unlikely(!old_ext))
return;
+ new_ext = page_ext_get(&newfolio->page);
+ if (unlikely(!new_ext)) {
+ page_ext_put(old_ext);
+ return;
+ }
+
old_page_owner = get_page_owner(old_ext);
new_page_owner = get_page_owner(new_ext);
new_page_owner->order = old_page_owner->order;
@@ -254,6 +266,8 @@ void __folio_copy_owner(struct folio *newfolio, struct folio *old)
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
+ page_ext_put(new_ext);
+ page_ext_put(old_ext);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -283,7 +297,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue;
}
- block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn);
pageblock_mt = get_pageblock_migratetype(page);
@@ -307,12 +321,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
if (PageReserved(page))
continue;
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
- continue;
+ goto ext_put_continue;
page_owner = get_page_owner(page_ext);
page_mt = gfp_migratetype(page_owner->gfp_mask);
@@ -323,9 +337,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
count[pageblock_mt]++;
pfn = block_end_pfn;
+ page_ext_put(page_ext);
break;
}
pfn += (1UL << page_owner->order) - 1;
+ext_put_continue:
+ page_ext_put(page_ext);
}
}
@@ -435,7 +452,7 @@ err:
void __dump_page_owner(const struct page *page)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext = page_ext_get((void *)page);
struct page_owner *page_owner;
depot_stack_handle_t handle;
gfp_t gfp_mask;
@@ -452,6 +469,7 @@ void __dump_page_owner(const struct page *page)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not present (never set?)\n");
+ page_ext_put(page_ext);
return;
}
@@ -482,6 +500,7 @@ void __dump_page_owner(const struct page *page)
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
migrate_reason_names[page_owner->last_migrate_reason]);
+ page_ext_put(page_ext);
}
static ssize_t
@@ -497,17 +516,25 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
return -EINVAL;
page = NULL;
- pfn = min_low_pfn + *ppos;
-
+ if (*ppos == 0)
+ pfn = min_low_pfn;
+ else
+ pfn = *ppos;
/* Find a valid PFN or the start of a MAX_ORDER_NR_PAGES area */
while (!pfn_valid(pfn) && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0)
pfn++;
- drain_all_pages(NULL);
-
/* Find an allocated page */
for (; pfn < max_pfn; pfn++) {
/*
+ * This temporary page_owner is required so
+ * that we can avoid the context switches while holding
+ * the rcu lock and copying the page owner information to
+ * user through copy_to_user() or GFP_KERNEL allocations.
+ */
+ struct page_owner page_owner_tmp;
+
+ /*
* If the new page is in a new MAX_ORDER_NR_PAGES area,
* validate the area as existing, skip it if not
*/
@@ -525,7 +552,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
continue;
}
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
@@ -534,14 +561,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* because we don't hold the zone lock.
*/
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
- continue;
+ goto ext_put_continue;
/*
* Although we do have the info about past allocation of free
* pages, it's not relevant for current memory usage.
*/
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
- continue;
+ goto ext_put_continue;
page_owner = get_page_owner(page_ext);
@@ -550,7 +577,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* would inflate the stats.
*/
if (!IS_ALIGNED(pfn, 1 << page_owner->order))
- continue;
+ goto ext_put_continue;
/*
* Access to page_ext->handle isn't synchronous so we should
@@ -558,18 +585,37 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
handle = READ_ONCE(page_owner->handle);
if (!handle)
- continue;
+ goto ext_put_continue;
/* Record the next PFN to read in the file offset */
- *ppos = (pfn - min_low_pfn) + 1;
+ *ppos = pfn + 1;
+ page_owner_tmp = *page_owner;
+ page_ext_put(page_ext);
return print_page_owner(buf, count, pfn, page,
- page_owner, handle);
+ &page_owner_tmp, handle);
+ext_put_continue:
+ page_ext_put(page_ext);
}
return 0;
}
+static loff_t lseek_page_owner(struct file *file, loff_t offset, int orig)
+{
+ switch (orig) {
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ case SEEK_CUR:
+ file->f_pos += offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return file->f_pos;
+}
+
static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
{
unsigned long pfn = zone->zone_start_pfn;
@@ -589,7 +635,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
}
- block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+ block_end_pfn = pageblock_end_pfn(pfn);
block_end_pfn = min(block_end_pfn, end_pfn);
for (; pfn < block_end_pfn; pfn++) {
@@ -617,18 +663,20 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageReserved(page))
continue;
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
/* Maybe overlapping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
- continue;
+ goto ext_put_continue;
/* Found early allocated page */
__set_page_owner_handle(page_ext, early_handle,
0, 0);
count++;
+ext_put_continue:
+ page_ext_put(page_ext);
}
cond_resched();
}
@@ -660,6 +708,7 @@ static void init_early_allocated_pages(void)
static const struct file_operations proc_page_owner_operations = {
.read = read_page_owner,
+ .llseek = lseek_page_owner,
};
static int __init pageowner_init(void)
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index e2062748791a..433dbce13fe1 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -53,7 +53,7 @@ static struct page_table_check *get_page_table_check(struct page_ext *page_ext)
}
/*
- * An enty is removed from the page table, decrement the counters for that page
+ * An entry is removed from the page table, decrement the counters for that page
* verify that it is of correct type and counters do not become negative.
*/
static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
@@ -68,7 +68,7 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
return;
page = pfn_to_page(pfn);
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
@@ -83,10 +83,11 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
}
page_ext = page_ext_next(page_ext);
}
+ page_ext_put(page_ext);
}
/*
- * A new enty is added to the page table, increment the counters for that page
+ * A new entry is added to the page table, increment the counters for that page
* verify that it is of correct type and is not being mapped with a different
* type to a different process.
*/
@@ -103,7 +104,7 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
return;
page = pfn_to_page(pfn);
- page_ext = lookup_page_ext(page);
+ page_ext = page_ext_get(page);
anon = PageAnon(page);
for (i = 0; i < pgcnt; i++) {
@@ -118,6 +119,7 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
}
page_ext = page_ext_next(page_ext);
}
+ page_ext_put(page_ext);
}
/*
@@ -126,9 +128,10 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
*/
void __page_table_check_zero(struct page *page, unsigned int order)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ struct page_ext *page_ext;
unsigned long i;
+ page_ext = page_ext_get(page);
BUG_ON(!page_ext);
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
@@ -137,6 +140,7 @@ void __page_table_check_zero(struct page *page, unsigned int order)
BUG_ON(atomic_read(&ptc->file_map_count));
page_ext = page_ext_next(page_ext);
}
+ page_ext_put(page_ext);
}
void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 8e9e574d535a..93e13fc17d3c 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -86,7 +86,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
!is_device_exclusive_entry(entry))
return false;
- pfn = swp_offset(entry);
+ pfn = swp_offset_pfn(entry);
} else if (is_swap_pte(*pvmw->pte)) {
swp_entry_t entry;
@@ -96,7 +96,7 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
!is_device_exclusive_entry(entry))
return false;
- pfn = swp_offset(entry);
+ pfn = swp_offset_pfn(entry);
} else {
if (!pte_present(*pvmw->pte))
return false;
@@ -221,7 +221,7 @@ restart:
return not_found(pvmw);
entry = pmd_to_swp_entry(pmde);
if (!is_migration_entry(entry) ||
- !check_pmd(swp_offset(entry), pvmw))
+ !check_pmd(swp_offset_pfn(entry), pvmw))
return not_found(pvmw);
return true;
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index fa7a3d21a751..2ff3a5bebceb 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -460,7 +460,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
} else { /* inside vma */
walk.vma = vma;
next = min(end, vma->vm_end);
- vma = vma->vm_next;
+ vma = find_vma(mm, vma->vm_end);
err = walk_page_test(start, next, &walk);
if (err > 0) {
@@ -482,7 +482,15 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
return err;
}
-/*
+/**
+ * walk_page_range_novma - walk a range of pagetables not backed by a vma
+ * @mm: mm_struct representing the target process of page table walk
+ * @start: start address of the virtual address range
+ * @end: end address of the virtual address range
+ * @ops: operation to call during the walk
+ * @pgd: pgd to walk if different from mm->pgd
+ * @private: private data for callbacks' usage
+ *
* Similar to walk_page_range() but can walk any page tables even if they are
* not backed by VMAs. Because 'unusual' entries may be walked this function
* will also not lock the PTEs for the pte_entry() callback. This is useful for
diff --git a/mm/rmap.c b/mm/rmap.c
index 93d5a6f793d2..2ec925e5fa6a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -23,10 +23,9 @@
* inode->i_rwsem (while writing or truncating, not reading or faulting)
* mm->mmap_lock
* mapping->invalidate_lock (in filemap_fault)
- * page->flags PG_locked (lock_page) * (see hugetlbfs below)
- * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
+ * page->flags PG_locked (lock_page)
+ * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share, see hugetlbfs below)
* mapping->i_mmap_rwsem
- * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* swap_lock (in swap_duplicate, swap_info_get)
@@ -46,10 +45,11 @@
* ->tasklist_lock
* pte map lock
*
- * * hugetlbfs PageHuge() pages take locks in this order:
- * mapping->i_mmap_rwsem
- * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
- * page->flags PG_locked (lock_page)
+ * hugetlbfs PageHuge() take locks in this order:
+ * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
+ * vma_lock (hugetlb specific lock for pmd_sharing)
+ * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing)
+ * page->flags PG_locked (lock_page)
*/
#include <linux/mm.h>
@@ -489,16 +489,16 @@ void __init anon_vma_init(void)
* if there is a mapcount, we can dereference the anon_vma after observing
* those.
*/
-struct anon_vma *page_get_anon_vma(struct page *page)
+struct anon_vma *folio_get_anon_vma(struct folio *folio)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
rcu_read_lock();
- anon_mapping = (unsigned long)READ_ONCE(page->mapping);
+ anon_mapping = (unsigned long)READ_ONCE(folio->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
- if (!page_mapped(page))
+ if (!folio_mapped(folio))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
@@ -508,13 +508,13 @@ struct anon_vma *page_get_anon_vma(struct page *page)
}
/*
- * If this page is still mapped, then its anon_vma cannot have been
+ * If this folio is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma:
* SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt).
*/
- if (!page_mapped(page)) {
+ if (!folio_mapped(folio)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
@@ -526,11 +526,11 @@ out:
}
/*
- * Similar to page_get_anon_vma() except it locks the anon_vma.
+ * Similar to folio_get_anon_vma() except it locks the anon_vma.
*
* Its a little more complex as it tries to keep the fast path to a single
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
- * reference like with page_get_anon_vma() and then block on the mutex
+ * reference like with folio_get_anon_vma() and then block on the mutex
* on !rwc->try_lock case.
*/
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
@@ -602,11 +602,6 @@ out:
return anon_vma;
}
-void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
-{
- anon_vma_unlock_read(anon_vma);
-}
-
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* Flush TLB entries for recently unmapped pages from remote CPUs. It is
@@ -770,13 +765,17 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
return vma_address(page, vma);
}
+/*
+ * Returns the actual pmd_t* where we expect 'address' to be mapped from, or
+ * NULL if it doesn't exist. No guarantees / checks on what the pmd_t*
+ * represents.
+ */
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd = NULL;
- pmd_t pmde;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -791,15 +790,6 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
goto out;
pmd = pmd_offset(pud, address);
- /*
- * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
- * without holding anon_vma lock for write. So when looking for a
- * genuine pmde (in which to find pte), test present and !THP together.
- */
- pmde = *pmd;
- barrier();
- if (!pmd_present(pmde) || pmd_trans_huge(pmde))
- pmd = NULL;
out:
return pmd;
}
@@ -833,6 +823,12 @@ static bool folio_referenced_one(struct folio *folio,
}
if (pvmw.pte) {
+ if (lru_gen_enabled() && pte_young(*pvmw.pte) &&
+ !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))) {
+ lru_gen_look_around(&pvmw);
+ referenced++;
+ }
+
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
/*
@@ -1101,22 +1097,20 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
*/
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
{
- struct anon_vma *anon_vma = vma->anon_vma;
- struct page *subpage = page;
-
- page = compound_head(page);
+ void *anon_vma = vma->anon_vma;
+ struct folio *folio = page_folio(page);
- VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_VMA(!anon_vma, vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ anon_vma += PAGE_MAPPING_ANON;
/*
* Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
* simultaneously, so a concurrent reader (eg folio_referenced()'s
* folio_test_anon()) will not see one without the other.
*/
- WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
- SetPageAnonExclusive(subpage);
+ WRITE_ONCE(folio->mapping, anon_vma);
+ SetPageAnonExclusive(page);
}
/**
@@ -1560,33 +1554,45 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
+ *
+ * We also must hold hugetlb vma_lock in write mode.
+ * Lock order dictates acquiring vma_lock BEFORE
+ * i_mmap_rwsem. We can only try lock here and fail
+ * if unsuccessful.
*/
- VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
- if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
- flush_tlb_range(vma, range.start, range.end);
- mmu_notifier_invalidate_range(mm, range.start,
- range.end);
-
- /*
- * The ref count of the PMD page was dropped
- * which is part of the way map counting
- * is done for shared PMDs. Return 'true'
- * here. When there is no other sharing,
- * huge_pmd_unshare returns false and we will
- * unmap the actual page and drop map count
- * to zero.
- */
- page_vma_mapped_walk_done(&pvmw);
- break;
+ if (!anon) {
+ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
+ if (!hugetlb_vma_trylock_write(vma)) {
+ page_vma_mapped_walk_done(&pvmw);
+ ret = false;
+ break;
+ }
+ if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
+ hugetlb_vma_unlock_write(vma);
+ flush_tlb_range(vma,
+ range.start, range.end);
+ mmu_notifier_invalidate_range(mm,
+ range.start, range.end);
+ /*
+ * The ref count of the PMD page was
+ * dropped which is part of the way map
+ * counting is done for shared PMDs.
+ * Return 'true' here. When there is
+ * no other sharing, huge_pmd_unshare
+ * returns false and we will unmap the
+ * actual page and drop map count
+ * to zero.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ hugetlb_vma_unlock_write(vma);
}
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
} else {
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
- /*
- * Nuke the page table entry. When having to clear
- * PageAnonExclusive(), we always have to flush.
- */
- if (should_defer_flush(mm, flags) && !anon_exclusive) {
+ /* Nuke the page table entry. */
+ if (should_defer_flush(mm, flags)) {
/*
* We clear the PTE but do not flush so potentially
* a remote CPU could still be writing to the folio.
@@ -1717,6 +1723,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
page_vma_mapped_walk_done(&pvmw);
break;
}
+
+ /* See page_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
page_try_share_anon_rmap(subpage)) {
swap_free(entry);
@@ -1936,26 +1944,41 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* To call huge_pmd_unshare, i_mmap_rwsem must be
* held in write mode. Caller needs to explicitly
* do this outside rmap routines.
+ *
+ * We also must hold hugetlb vma_lock in write mode.
+ * Lock order dictates acquiring vma_lock BEFORE
+ * i_mmap_rwsem. We can only try lock here and
+ * fail if unsuccessful.
*/
- VM_BUG_ON(!anon && !(flags & TTU_RMAP_LOCKED));
- if (!anon && huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
- flush_tlb_range(vma, range.start, range.end);
- mmu_notifier_invalidate_range(mm, range.start,
- range.end);
-
- /*
- * The ref count of the PMD page was dropped
- * which is part of the way map counting
- * is done for shared PMDs. Return 'true'
- * here. When there is no other sharing,
- * huge_pmd_unshare returns false and we will
- * unmap the actual page and drop map count
- * to zero.
- */
- page_vma_mapped_walk_done(&pvmw);
- break;
+ if (!anon) {
+ VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
+ if (!hugetlb_vma_trylock_write(vma)) {
+ page_vma_mapped_walk_done(&pvmw);
+ ret = false;
+ break;
+ }
+ if (huge_pmd_unshare(mm, vma, address, pvmw.pte)) {
+ hugetlb_vma_unlock_write(vma);
+ flush_tlb_range(vma,
+ range.start, range.end);
+ mmu_notifier_invalidate_range(mm,
+ range.start, range.end);
+
+ /*
+ * The ref count of the PMD page was
+ * dropped which is part of the way map
+ * counting is done for shared PMDs.
+ * Return 'true' here. When there is
+ * no other sharing, huge_pmd_unshare
+ * returns false and we will unmap the
+ * actual page and drop map count
+ * to zero.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ hugetlb_vma_unlock_write(vma);
}
-
/* Nuke the hugetlb page table entry */
pteval = huge_ptep_clear_flush(vma, address, pvmw.pte);
} else {
@@ -2048,6 +2071,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
}
VM_BUG_ON_PAGE(pte_write(pteval) && folio_test_anon(folio) &&
!anon_exclusive, subpage);
+
+ /* See page_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
page_try_share_anon_rmap(subpage)) {
if (folio_test_hugetlb(folio))
@@ -2073,7 +2098,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
else
entry = make_readable_migration_entry(
page_to_pfn(subpage));
-
+ if (pte_young(pteval))
+ entry = make_migration_entry_young(entry);
+ if (pte_dirty(pteval))
+ entry = make_migration_entry_dirty(entry);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
diff --git a/mm/rodata_test.c b/mm/rodata_test.c
index 2613371945b7..6d783436951f 100644
--- a/mm/rodata_test.c
+++ b/mm/rodata_test.c
@@ -9,13 +9,13 @@
#include <linux/rodata_test.h>
#include <linux/uaccess.h>
+#include <linux/mm.h>
#include <asm/sections.h>
static const int rodata_test_data = 0xC3;
void rodata_test(void)
{
- unsigned long start, end;
int zero = 0;
/* test 1: read the value */
@@ -39,13 +39,11 @@ void rodata_test(void)
}
/* test 4: check if the rodata section is PAGE_SIZE aligned */
- start = (unsigned long)__start_rodata;
- end = (unsigned long)__end_rodata;
- if (start & (PAGE_SIZE - 1)) {
+ if (!PAGE_ALIGNED(__start_rodata)) {
pr_err("start of .rodata is not page size aligned\n");
return;
}
- if (end & (PAGE_SIZE - 1)) {
+ if (!PAGE_ALIGNED(__end_rodata)) {
pr_err("end of .rodata is not page size aligned\n");
return;
}
diff --git a/mm/secretmem.c b/mm/secretmem.c
index 3f7154099795..04c3ac9448a1 100644
--- a/mm/secretmem.c
+++ b/mm/secretmem.c
@@ -276,12 +276,10 @@ static struct file_system_type secretmem_fs = {
.kill_sb = kill_anon_super,
};
-static int secretmem_init(void)
+static int __init secretmem_init(void)
{
- int ret = 0;
-
if (!secretmem_enable)
- return ret;
+ return 0;
secretmem_mnt = kern_mount(&secretmem_fs);
if (IS_ERR(secretmem_mnt))
@@ -290,6 +288,6 @@ static int secretmem_init(void)
/* prevent secretmem mappings from ever getting PROT_EXEC */
secretmem_mnt->mnt_flags |= MNT_NOEXEC;
- return ret;
+ return 0;
}
fs_initcall(secretmem_init);
diff --git a/mm/shmem.c b/mm/shmem.c
index 42e5888bf84d..cabe48d55a64 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -38,6 +38,7 @@
#include <linux/hugetlb.h>
#include <linux/fs_parser.h>
#include <linux/swapfile.h>
+#include <linux/iversion.h>
#include "swap.h"
static struct vfsmount *shm_mnt;
@@ -139,17 +140,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct folio **foliop, enum sgp_type sgp,
gfp_t gfp, struct vm_area_struct *vma,
vm_fault_t *fault_type);
-static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp,
- gfp_t gfp, struct vm_area_struct *vma,
- struct vm_fault *vmf, vm_fault_t *fault_type);
-
-int shmem_getpage(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp)
-{
- return shmem_getpage_gfp(inode, index, pagep, sgp,
- mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
-}
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
@@ -190,7 +180,7 @@ static inline int shmem_reacct_size(unsigned long flags,
/*
* ... whereas tmpfs objects are accounted incrementally as
* pages are allocated, in order to allow large sparse files.
- * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
+ * shmem_get_folio reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
*/
static inline int shmem_acct_block(unsigned long flags, long pages)
@@ -472,20 +462,22 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
-bool shmem_is_huge(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index)
+bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ pgoff_t index, bool shmem_huge_force)
{
loff_t i_size;
if (!S_ISREG(inode->i_mode))
return false;
- if (shmem_huge == SHMEM_HUGE_DENY)
- return false;
if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
return false;
+ if (shmem_huge_force)
+ return true;
if (shmem_huge == SHMEM_HUGE_FORCE)
return true;
+ if (shmem_huge == SHMEM_HUGE_DENY)
+ return false;
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
@@ -629,7 +621,7 @@ next:
goto move_back;
}
- ret = split_huge_page(&folio->page);
+ ret = split_folio(folio);
folio_unlock(folio);
folio_put(folio);
@@ -680,8 +672,8 @@ static long shmem_unused_huge_count(struct super_block *sb,
#define shmem_huge SHMEM_HUGE_DENY
-bool shmem_is_huge(struct vm_area_struct *vma,
- struct inode *inode, pgoff_t index)
+bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
+ pgoff_t index, bool shmem_huge_force)
{
return false;
}
@@ -763,23 +755,22 @@ error:
}
/*
- * Like delete_from_page_cache, but substitutes swap for page.
+ * Like delete_from_page_cache, but substitutes swap for @folio.
*/
-static void shmem_delete_from_page_cache(struct page *page, void *radswap)
+static void shmem_delete_from_page_cache(struct folio *folio, void *radswap)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = folio->mapping;
+ long nr = folio_nr_pages(folio);
int error;
- VM_BUG_ON_PAGE(PageCompound(page), page);
-
xa_lock_irq(&mapping->i_pages);
- error = shmem_replace_entry(mapping, page->index, page, radswap);
- page->mapping = NULL;
- mapping->nrpages--;
- __dec_lruvec_page_state(page, NR_FILE_PAGES);
- __dec_lruvec_page_state(page, NR_SHMEM);
+ error = shmem_replace_entry(mapping, folio->index, folio, radswap);
+ folio->mapping = NULL;
+ mapping->nrpages -= nr;
+ __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
xa_unlock_irq(&mapping->i_pages);
- put_page(page);
+ folio_put(folio);
BUG_ON(error);
}
@@ -886,10 +877,9 @@ void shmem_unlock_mapping(struct address_space *mapping)
static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
{
struct folio *folio;
- struct page *page;
/*
- * At first avoid shmem_getpage(,,,SGP_READ): that fails
+ * At first avoid shmem_get_folio(,,,SGP_READ): that fails
* beyond i_size, and reports fallocated pages as holes.
*/
folio = __filemap_get_folio(inode->i_mapping, index,
@@ -900,9 +890,9 @@ static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index)
* But read a page back from swap if any of it is within i_size
* (although in some cases this is just a waste of time).
*/
- page = NULL;
- shmem_getpage(inode, index, &page, SGP_READ);
- return page ? page_folio(page) : NULL;
+ folio = NULL;
+ shmem_get_folio(inode, index, &folio, SGP_READ);
+ return folio;
}
/*
@@ -1043,6 +1033,7 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
inode->i_ctime = inode->i_mtime = current_time(inode);
+ inode_inc_iversion(inode);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -1069,7 +1060,7 @@ static int shmem_getattr(struct user_namespace *mnt_userns,
STATX_ATTR_NODUMP);
generic_fillattr(&init_user_ns, inode, stat);
- if (shmem_is_huge(NULL, inode, 0))
+ if (shmem_is_huge(NULL, inode, 0, false))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1087,6 +1078,8 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
struct inode *inode = d_inode(dentry);
struct shmem_inode_info *info = SHMEM_I(inode);
int error;
+ bool update_mtime = false;
+ bool update_ctime = true;
error = setattr_prepare(&init_user_ns, dentry, attr);
if (error)
@@ -1107,7 +1100,9 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
if (error)
return error;
i_size_write(inode, newsize);
- inode->i_ctime = inode->i_mtime = current_time(inode);
+ update_mtime = true;
+ } else {
+ update_ctime = false;
}
if (newsize <= oldsize) {
loff_t holebegin = round_up(newsize, PAGE_SIZE);
@@ -1127,6 +1122,12 @@ static int shmem_setattr(struct user_namespace *mnt_userns,
setattr_copy(&init_user_ns, inode, attr);
if (attr->ia_valid & ATTR_MODE)
error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode);
+ if (!error && update_ctime) {
+ inode->i_ctime = current_time(inode);
+ if (update_mtime)
+ inode->i_mtime = inode->i_ctime;
+ inode_inc_iversion(inode);
+ }
return error;
}
@@ -1328,17 +1329,18 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
* "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages,
* and its shmem_writeback() needs them to be split when swapping.
*/
- if (PageTransCompound(page)) {
+ if (folio_test_large(folio)) {
/* Ensure the subpages are still dirty */
- SetPageDirty(page);
+ folio_test_set_dirty(folio);
if (split_huge_page(page) < 0)
goto redirty;
- ClearPageDirty(page);
+ folio = page_folio(page);
+ folio_clear_dirty(folio);
}
- BUG_ON(!PageLocked(page));
- mapping = page->mapping;
- index = page->index;
+ BUG_ON(!folio_test_locked(folio));
+ mapping = folio->mapping;
+ index = folio->index;
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
@@ -1361,15 +1363,15 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
* value into swapfile.c, the only way we can correctly account for a
- * fallocated page arriving here is now to initialize it and write it.
+ * fallocated folio arriving here is now to initialize it and write it.
*
- * That's okay for a page already fallocated earlier, but if we have
+ * That's okay for a folio already fallocated earlier, but if we have
* not yet completed the fallocation, then (a) we want to keep track
- * of this page in case we have to undo it, and (b) it may not be a
+ * of this folio in case we have to undo it, and (b) it may not be a
* good idea to continue anyway, once we're pushing into swap. So
- * reactivate the page, and let shmem_fallocate() quit when too many.
+ * reactivate the folio, and let shmem_fallocate() quit when too many.
*/
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (inode->i_private) {
struct shmem_falloc *shmem_falloc;
spin_lock(&inode->i_lock);
@@ -1385,9 +1387,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (shmem_falloc)
goto redirty;
}
- clear_highpage(page);
- flush_dcache_page(page);
- SetPageUptodate(page);
+ folio_zero_range(folio, 0, folio_size(folio));
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
}
swap = folio_alloc_swap(folio);
@@ -1396,7 +1398,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
- * if it's not already there. Do it now before the page is
+ * if it's not already there. Do it now before the folio is
* moved to swap cache, when its pagelock no longer protects
* the inode from eviction. But don't unlock the mutex until
* we've incremented swapped, because shmem_unuse_inode() will
@@ -1406,7 +1408,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
if (list_empty(&info->swaplist))
list_add(&info->swaplist, &shmem_swaplist);
- if (add_to_swap_cache(page, swap,
+ if (add_to_swap_cache(folio, swap,
__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN,
NULL) == 0) {
spin_lock_irq(&info->lock);
@@ -1415,21 +1417,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
spin_unlock_irq(&info->lock);
swap_shmem_alloc(swap);
- shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
+ shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap));
mutex_unlock(&shmem_swaplist_mutex);
- BUG_ON(page_mapped(page));
- swap_writepage(page, wbc);
+ BUG_ON(folio_mapped(folio));
+ swap_writepage(&folio->page, wbc);
return 0;
}
mutex_unlock(&shmem_swaplist_mutex);
- put_swap_page(page, swap);
+ put_swap_folio(folio, swap);
redirty:
- set_page_dirty(page);
+ folio_mark_dirty(folio);
if (wbc->for_reclaim)
- return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
- unlock_page(page);
+ return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */
+ folio_unlock(folio);
return 0;
}
@@ -1486,7 +1488,7 @@ static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
mpol_cond_put(vma->vm_policy);
}
-static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
+static struct folio *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
@@ -1499,7 +1501,9 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
page = swap_cluster_readahead(swap, gfp, &vmf);
shmem_pseudo_vma_destroy(&pvma);
- return page;
+ if (!page)
+ return NULL;
+ return page_folio(page);
}
/*
@@ -1560,12 +1564,6 @@ static struct folio *shmem_alloc_folio(gfp_t gfp,
return folio;
}
-static struct page *shmem_alloc_page(gfp_t gfp,
- struct shmem_inode_info *info, pgoff_t index)
-{
- return &shmem_alloc_folio(gfp, info, index)->page;
-}
-
static struct folio *shmem_alloc_and_acct_folio(gfp_t gfp, struct inode *inode,
pgoff_t index, bool huge)
{
@@ -1599,7 +1597,7 @@ failed:
/*
* When a page is moved from swapcache to shmem filecache (either by the
- * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
+ * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of
* shmem_unuse_inode()), it may have been read in earlier from swap, in
* ignorance of the mapping it belongs to. If that mapping has special
* constraints (like the gma500 GEM driver, which requires RAM below 4GB),
@@ -1614,54 +1612,52 @@ static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp)
return folio_zonenum(folio) > gfp_zone(gfp);
}
-static int shmem_replace_page(struct page **pagep, gfp_t gfp,
+static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
- struct page *oldpage, *newpage;
struct folio *old, *new;
struct address_space *swap_mapping;
swp_entry_t entry;
pgoff_t swap_index;
int error;
- oldpage = *pagep;
- entry.val = page_private(oldpage);
+ old = *foliop;
+ entry = folio_swap_entry(old);
swap_index = swp_offset(entry);
- swap_mapping = page_mapping(oldpage);
+ swap_mapping = swap_address_space(entry);
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
- newpage = shmem_alloc_page(gfp, info, index);
- if (!newpage)
+ VM_BUG_ON_FOLIO(folio_test_large(old), old);
+ new = shmem_alloc_folio(gfp, info, index);
+ if (!new)
return -ENOMEM;
- get_page(newpage);
- copy_highpage(newpage, oldpage);
- flush_dcache_page(newpage);
+ folio_get(new);
+ folio_copy(new, old);
+ flush_dcache_folio(new);
- __SetPageLocked(newpage);
- __SetPageSwapBacked(newpage);
- SetPageUptodate(newpage);
- set_page_private(newpage, entry.val);
- SetPageSwapCache(newpage);
+ __folio_set_locked(new);
+ __folio_set_swapbacked(new);
+ folio_mark_uptodate(new);
+ folio_set_swap_entry(new, entry);
+ folio_set_swapcache(new);
/*
* Our caller will very soon move newpage out of swapcache, but it's
* a nice clean interface for us to replace oldpage by newpage there.
*/
xa_lock_irq(&swap_mapping->i_pages);
- error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage);
+ error = shmem_replace_entry(swap_mapping, swap_index, old, new);
if (!error) {
- old = page_folio(oldpage);
- new = page_folio(newpage);
mem_cgroup_migrate(old, new);
- __inc_lruvec_page_state(newpage, NR_FILE_PAGES);
- __inc_lruvec_page_state(newpage, NR_SHMEM);
- __dec_lruvec_page_state(oldpage, NR_FILE_PAGES);
- __dec_lruvec_page_state(oldpage, NR_SHMEM);
+ __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
+ __lruvec_stat_mod_folio(new, NR_SHMEM, 1);
+ __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
+ __lruvec_stat_mod_folio(old, NR_SHMEM, -1);
}
xa_unlock_irq(&swap_mapping->i_pages);
@@ -1671,18 +1667,17 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
* both PageSwapCache and page_private after getting page lock;
* but be defensive. Reverse old to newpage for clear and free.
*/
- oldpage = newpage;
+ old = new;
} else {
- lru_cache_add(newpage);
- *pagep = newpage;
+ folio_add_lru(new);
+ *foliop = new;
}
- ClearPageSwapCache(oldpage);
- set_page_private(oldpage, 0);
+ folio_clear_swapcache(old);
+ old->private = NULL;
- unlock_page(oldpage);
- put_page(oldpage);
- put_page(oldpage);
+ folio_unlock(old);
+ folio_put_refs(old, 2);
return error;
}
@@ -1730,7 +1725,6 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL;
- struct page *page;
struct folio *folio = NULL;
swp_entry_t swap;
int error;
@@ -1743,8 +1737,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
return -EIO;
/* Look it up and read it in.. */
- page = lookup_swap_cache(swap, NULL, 0);
- if (!page) {
+ folio = swap_cache_get_folio(swap, NULL, 0);
+ if (!folio) {
/* Or update major stats only when swapin succeeds?? */
if (fault_type) {
*fault_type |= VM_FAULT_MAJOR;
@@ -1752,13 +1746,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
count_memcg_event_mm(charge_mm, PGMAJFAULT);
}
/* Here we actually start the io */
- page = shmem_swapin(swap, gfp, info, index);
- if (!page) {
+ folio = shmem_swapin(swap, gfp, info, index);
+ if (!folio) {
error = -ENOMEM;
goto failed;
}
}
- folio = page_folio(page);
/* We have to do this with folio locked to prevent races */
folio_lock(folio);
@@ -1781,8 +1774,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
arch_swap_restore(swap, folio);
if (shmem_should_replace_folio(folio, gfp)) {
- error = shmem_replace_page(&page, gfp, info, index);
- folio = page_folio(page);
+ error = shmem_replace_folio(&folio, gfp, info, index);
if (error)
goto failed;
}
@@ -1822,7 +1814,7 @@ unlock:
}
/*
- * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
+ * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap
@@ -1831,10 +1823,10 @@ unlock:
* vma, vmf, and fault_type are only supplied by shmem_fault:
* otherwise they are NULL.
*/
-static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
- struct page **pagep, enum sgp_type sgp, gfp_t gfp,
- struct vm_area_struct *vma, struct vm_fault *vmf,
- vm_fault_t *fault_type)
+static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
+ struct folio **foliop, enum sgp_type sgp, gfp_t gfp,
+ struct vm_area_struct *vma, struct vm_fault *vmf,
+ vm_fault_t *fault_type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
@@ -1874,7 +1866,7 @@ repeat:
if (error == -EEXIST)
goto repeat;
- *pagep = &folio->page;
+ *foliop = folio;
return error;
}
@@ -1884,7 +1876,7 @@ repeat:
folio_mark_accessed(folio);
if (folio_test_uptodate(folio))
goto out;
- /* fallocated page */
+ /* fallocated folio */
if (sgp != SGP_READ)
goto clear;
folio_unlock(folio);
@@ -1892,10 +1884,10 @@ repeat:
}
/*
- * SGP_READ: succeed on hole, with NULL page, letting caller zero.
- * SGP_NOALLOC: fail on hole, with NULL page, letting caller fail.
+ * SGP_READ: succeed on hole, with NULL folio, letting caller zero.
+ * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail.
*/
- *pagep = NULL;
+ *foliop = NULL;
if (sgp == SGP_READ)
return 0;
if (sgp == SGP_NOALLOC)
@@ -1910,7 +1902,7 @@ repeat:
return 0;
}
- if (!shmem_is_huge(vma, inode, index))
+ if (!shmem_is_huge(vma, inode, index, false))
goto alloc_nohuge;
huge_gfp = vma_thp_gfp_mask(vma);
@@ -1928,7 +1920,7 @@ alloc_nohuge:
if (error != -ENOSPC)
goto unlock;
/*
- * Try to reclaim some space by splitting a huge page
+ * Try to reclaim some space by splitting a large folio
* beyond i_size on the filesystem.
*/
while (retry--) {
@@ -1964,9 +1956,9 @@ alloc_nohuge:
if (folio_test_pmd_mappable(folio) &&
DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
- hindex + HPAGE_PMD_NR - 1) {
+ folio_next_index(folio) - 1) {
/*
- * Part of the huge page is beyond i_size: subject
+ * Part of the large folio is beyond i_size: subject
* to shrink under memory pressure.
*/
spin_lock(&sbinfo->shrinklist_lock);
@@ -1983,14 +1975,14 @@ alloc_nohuge:
}
/*
- * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
+ * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio.
*/
if (sgp == SGP_FALLOC)
sgp = SGP_WRITE;
clear:
/*
- * Let SGP_WRITE caller clear ends if write does not fill page;
- * but SGP_FALLOC on a page fallocated earlier must initialize
+ * Let SGP_WRITE caller clear ends if write does not fill folio;
+ * but SGP_FALLOC on a folio fallocated earlier must initialize
* it now, lest undo on failure cancel our earlier guarantee.
*/
if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) {
@@ -2016,7 +2008,7 @@ clear:
goto unlock;
}
out:
- *pagep = folio_page(folio, index - hindex);
+ *foliop = folio;
return 0;
/*
@@ -2046,6 +2038,13 @@ unlock:
return error;
}
+int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop,
+ enum sgp_type sgp)
+{
+ return shmem_get_folio_gfp(inode, index, foliop, sgp,
+ mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL);
+}
+
/*
* This is like autoremove_wake_function, but it removes the wait queue
* entry unconditionally - even if something else had already woken the
@@ -2063,6 +2062,7 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+ struct folio *folio = NULL;
int err;
vm_fault_t ret = VM_FAULT_LOCKED;
@@ -2125,10 +2125,12 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
spin_unlock(&inode->i_lock);
}
- err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, SGP_CACHE,
+ err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE,
gfp, vma, vmf, &ret);
if (err)
return vmf_error(err);
+ if (folio)
+ vmf->page = folio_file_page(folio, vmf->pgoff);
return ret;
}
@@ -2398,7 +2400,6 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
void *page_kaddr;
struct folio *folio;
- struct page *page;
int ret;
pgoff_t max_off;
@@ -2417,53 +2418,53 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
if (!*pagep) {
ret = -ENOMEM;
- page = shmem_alloc_page(gfp, info, pgoff);
- if (!page)
+ folio = shmem_alloc_folio(gfp, info, pgoff);
+ if (!folio)
goto out_unacct_blocks;
if (!zeropage) { /* COPY */
- page_kaddr = kmap_atomic(page);
+ page_kaddr = kmap_local_folio(folio, 0);
ret = copy_from_user(page_kaddr,
(const void __user *)src_addr,
PAGE_SIZE);
- kunmap_atomic(page_kaddr);
+ kunmap_local(page_kaddr);
/* fallback to copy_from_user outside mmap_lock */
if (unlikely(ret)) {
- *pagep = page;
+ *pagep = &folio->page;
ret = -ENOENT;
/* don't free the page */
goto out_unacct_blocks;
}
- flush_dcache_page(page);
+ flush_dcache_folio(folio);
} else { /* ZEROPAGE */
- clear_user_highpage(page, dst_addr);
+ clear_user_highpage(&folio->page, dst_addr);
}
} else {
- page = *pagep;
+ folio = page_folio(*pagep);
+ VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
*pagep = NULL;
}
- VM_BUG_ON(PageLocked(page));
- VM_BUG_ON(PageSwapBacked(page));
- __SetPageLocked(page);
- __SetPageSwapBacked(page);
- __SetPageUptodate(page);
+ VM_BUG_ON(folio_test_locked(folio));
+ VM_BUG_ON(folio_test_swapbacked(folio));
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
+ __folio_mark_uptodate(folio);
ret = -EFAULT;
max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
if (unlikely(pgoff >= max_off))
goto out_release;
- folio = page_folio(page);
ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL,
gfp & GFP_RECLAIM_MASK, dst_mm);
if (ret)
goto out_release;
ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
- page, true, wp_copy);
+ &folio->page, true, wp_copy);
if (ret)
goto out_delete_from_cache;
@@ -2473,13 +2474,13 @@ int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
shmem_recalc_inode(inode);
spin_unlock_irq(&info->lock);
- unlock_page(page);
+ folio_unlock(folio);
return 0;
out_delete_from_cache:
- delete_from_page_cache(page);
+ filemap_remove_folio(folio);
out_release:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out_unacct_blocks:
shmem_inode_unacct_blocks(inode, 1);
return ret;
@@ -2498,6 +2499,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
struct inode *inode = mapping->host;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t index = pos >> PAGE_SHIFT;
+ struct folio *folio;
int ret = 0;
/* i_rwsem is held by caller */
@@ -2509,14 +2511,15 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
return -EPERM;
}
- ret = shmem_getpage(inode, index, pagep, SGP_WRITE);
+ ret = shmem_get_folio(inode, index, &folio, SGP_WRITE);
if (ret)
return ret;
+ *pagep = folio_file_page(folio, index);
if (PageHWPoison(*pagep)) {
- unlock_page(*pagep);
- put_page(*pagep);
+ folio_unlock(folio);
+ folio_put(folio);
*pagep = NULL;
return -EIO;
}
@@ -2575,6 +2578,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
offset = *ppos & ~PAGE_MASK;
for (;;) {
+ struct folio *folio = NULL;
struct page *page = NULL;
pgoff_t end_index;
unsigned long nr, ret;
@@ -2589,17 +2593,18 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
break;
}
- error = shmem_getpage(inode, index, &page, SGP_READ);
+ error = shmem_get_folio(inode, index, &folio, SGP_READ);
if (error) {
if (error == -EINVAL)
error = 0;
break;
}
- if (page) {
- unlock_page(page);
+ if (folio) {
+ folio_unlock(folio);
+ page = folio_file_page(folio, index);
if (PageHWPoison(page)) {
- put_page(page);
+ folio_put(folio);
error = -EIO;
break;
}
@@ -2615,14 +2620,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (index == end_index) {
nr = i_size & ~PAGE_MASK;
if (nr <= offset) {
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
break;
}
}
nr -= offset;
- if (page) {
+ if (folio) {
/*
* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
@@ -2634,13 +2639,13 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
* Mark the page accessed if we read the beginning.
*/
if (!offset)
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*/
ret = copy_page_to_iter(page, offset, nr, to);
- put_page(page);
+ folio_put(folio);
} else if (user_backed_iter(to)) {
/*
@@ -2783,7 +2788,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
info->fallocend = end;
for (index = start; index < end; ) {
- struct page *page;
+ struct folio *folio;
/*
* Good, the fallocate(2) manpage permits EINTR: we may have
@@ -2794,10 +2799,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
else
- error = shmem_getpage(inode, index, &page, SGP_FALLOC);
+ error = shmem_get_folio(inode, index, &folio,
+ SGP_FALLOC);
if (error) {
info->fallocend = undo_fallocend;
- /* Remove the !PageUptodate pages we added */
+ /* Remove the !uptodate folios we added */
if (index > start) {
shmem_undo_range(inode,
(loff_t)start << PAGE_SHIFT,
@@ -2806,37 +2812,34 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
goto undone;
}
- index++;
/*
* Here is a more important optimization than it appears:
- * a second SGP_FALLOC on the same huge page will clear it,
- * making it PageUptodate and un-undoable if we fail later.
+ * a second SGP_FALLOC on the same large folio will clear it,
+ * making it uptodate and un-undoable if we fail later.
*/
- if (PageTransCompound(page)) {
- index = round_up(index, HPAGE_PMD_NR);
- /* Beware 32-bit wraparound */
- if (!index)
- index--;
- }
+ index = folio_next_index(folio);
+ /* Beware 32-bit wraparound */
+ if (!index)
+ index--;
/*
* Inform shmem_writepage() how far we have reached.
* No need for lock or barrier: we have the page lock.
*/
- if (!PageUptodate(page))
+ if (!folio_test_uptodate(folio))
shmem_falloc.nr_falloced += index - shmem_falloc.next;
shmem_falloc.next = index;
/*
- * If !PageUptodate, leave it that way so that freeable pages
+ * If !uptodate, leave it that way so that freeable folios
* can be recognized if we need to rollback on error later.
- * But set_page_dirty so that memory pressure will swap rather
- * than free the pages we are allocating (and SGP_CACHE pages
+ * But mark it dirty so that memory pressure will swap rather
+ * than free the folios we are allocating (and SGP_CACHE folios
* might still be clean: we now need to mark those dirty too).
*/
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
cond_resched();
}
@@ -2901,6 +2904,7 @@ shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir,
error = 0;
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
}
@@ -2976,6 +2980,7 @@ static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentr
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ inode_inc_iversion(dir);
inc_nlink(inode);
ihold(inode); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
@@ -2993,6 +2998,7 @@ static int shmem_unlink(struct inode *dir, struct dentry *dentry)
dir->i_size -= BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
+ inode_inc_iversion(dir);
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
return 0;
@@ -3082,6 +3088,8 @@ static int shmem_rename2(struct user_namespace *mnt_userns,
old_dir->i_ctime = old_dir->i_mtime =
new_dir->i_ctime = new_dir->i_mtime =
inode->i_ctime = current_time(old_dir);
+ inode_inc_iversion(old_dir);
+ inode_inc_iversion(new_dir);
return 0;
}
@@ -3091,7 +3099,7 @@ static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
int error;
int len;
struct inode *inode;
- struct page *page;
+ struct folio *folio;
len = strlen(symname) + 1;
if (len > PAGE_SIZE)
@@ -3119,21 +3127,22 @@ static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
inode->i_op = &shmem_short_symlink_operations;
} else {
inode_nohighmem(inode);
- error = shmem_getpage(inode, 0, &page, SGP_WRITE);
+ error = shmem_get_folio(inode, 0, &folio, SGP_WRITE);
if (error) {
iput(inode);
return error;
}
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
- memcpy(page_address(page), symname, len);
- SetPageUptodate(page);
- set_page_dirty(page);
- unlock_page(page);
- put_page(page);
+ memcpy(folio_address(folio), symname, len);
+ folio_mark_uptodate(folio);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
+ folio_put(folio);
}
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = current_time(dir);
+ inode_inc_iversion(dir);
d_instantiate(dentry, inode);
dget(dentry);
return 0;
@@ -3141,40 +3150,41 @@ static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir,
static void shmem_put_link(void *arg)
{
- mark_page_accessed(arg);
- put_page(arg);
+ folio_mark_accessed(arg);
+ folio_put(arg);
}
static const char *shmem_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct page *page = NULL;
+ struct folio *folio = NULL;
int error;
+
if (!dentry) {
- page = find_get_page(inode->i_mapping, 0);
- if (!page)
+ folio = filemap_get_folio(inode->i_mapping, 0);
+ if (!folio)
return ERR_PTR(-ECHILD);
- if (PageHWPoison(page) ||
- !PageUptodate(page)) {
- put_page(page);
+ if (PageHWPoison(folio_page(folio, 0)) ||
+ !folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
} else {
- error = shmem_getpage(inode, 0, &page, SGP_READ);
+ error = shmem_get_folio(inode, 0, &folio, SGP_READ);
if (error)
return ERR_PTR(error);
- if (!page)
+ if (!folio)
return ERR_PTR(-ECHILD);
- if (PageHWPoison(page)) {
- unlock_page(page);
- put_page(page);
+ if (PageHWPoison(folio_page(folio, 0))) {
+ folio_unlock(folio);
+ folio_put(folio);
return ERR_PTR(-ECHILD);
}
- unlock_page(page);
+ folio_unlock(folio);
}
- set_delayed_call(done, shmem_put_link, page);
- return page_address(page);
+ set_delayed_call(done, shmem_put_link, folio);
+ return folio_address(folio);
}
#ifdef CONFIG_TMPFS_XATTR
@@ -3204,6 +3214,7 @@ static int shmem_fileattr_set(struct user_namespace *mnt_userns,
shmem_set_inode_flags(inode, info->fsflags);
inode->i_ctime = current_time(inode);
+ inode_inc_iversion(inode);
return 0;
}
@@ -3267,9 +3278,15 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
size_t size, int flags)
{
struct shmem_inode_info *info = SHMEM_I(inode);
+ int err;
name = xattr_full_name(handler, name);
- return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
+ err = simple_xattr_set(&info->xattrs, name, value, size, flags, NULL);
+ if (!err) {
+ inode->i_ctime = current_time(inode);
+ inode_inc_iversion(inode);
+ }
+ return err;
}
static const struct xattr_handler shmem_security_xattr_handler = {
@@ -3732,7 +3749,7 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_NOUSER;
}
sb->s_export_op = &shmem_export_ops;
- sb->s_flags |= SB_NOSEC;
+ sb->s_flags |= SB_NOSEC | SB_I_VERSION;
#else
sb->s_flags |= SB_NOUSER;
#endif
@@ -4266,18 +4283,20 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
{
#ifdef CONFIG_SHMEM
struct inode *inode = mapping->host;
+ struct folio *folio;
struct page *page;
int error;
BUG_ON(!shmem_mapping(mapping));
- error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
+ error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE,
gfp, NULL, NULL, NULL);
if (error)
return ERR_PTR(error);
- unlock_page(page);
+ folio_unlock(folio);
+ page = folio_file_page(folio, index);
if (PageHWPoison(page)) {
- put_page(page);
+ folio_put(folio);
return ERR_PTR(-EIO);
}
diff --git a/mm/shuffle.c b/mm/shuffle.c
index c13c33b247e8..fb1393b8b3a9 100644
--- a/mm/shuffle.c
+++ b/mm/shuffle.c
@@ -12,23 +12,22 @@
DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
static bool shuffle_param;
-static int shuffle_show(char *buffer, const struct kernel_param *kp)
-{
- return sprintf(buffer, "%c\n", shuffle_param ? 'Y' : 'N');
-}
-static __meminit int shuffle_store(const char *val,
+static __meminit int shuffle_param_set(const char *val,
const struct kernel_param *kp)
{
- int rc = param_set_bool(val, kp);
-
- if (rc < 0)
- return rc;
- if (shuffle_param)
+ if (param_set_bool(val, kp))
+ return -EINVAL;
+ if (*(bool *)kp->arg)
static_branch_enable(&page_alloc_shuffle_key);
return 0;
}
-module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
+
+static const struct kernel_param_ops shuffle_param_ops = {
+ .set = shuffle_param_set,
+ .get = param_get_bool,
+};
+module_param_cb(shuffle, &shuffle_param_ops, &shuffle_param, 0400);
/*
* For two pages to be swapped in the shuffle, they must be free (on a
diff --git a/mm/slab.h b/mm/slab.h
index 65023f000d42..0202a8c2f0d2 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -739,6 +739,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
memset(p[i], 0, s->object_size);
kmemleak_alloc_recursive(p[i], s->object_size, 1,
s->flags, flags);
+ kmsan_slab_alloc(s, p[i], flags);
}
memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 9ad97ae73a0a..33b1886b06eb 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -925,6 +925,7 @@ void free_large_kmalloc(struct folio *folio, void *object)
kmemleak_free(object);
kasan_kfree_large(object);
+ kmsan_kfree_large(object);
mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
@@ -1104,6 +1105,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
ptr = kasan_kmalloc_large(ptr, size, flags);
/* As ptr might get tagged, call kmemleak hook after KASAN. */
kmemleak_alloc(ptr, size, 1, flags);
+ kmsan_kmalloc_large(ptr, size, flags);
return ptr;
}
diff --git a/mm/slub.c b/mm/slub.c
index 2a6b3f31ce7e..96dd392d7f99 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -22,6 +22,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
+#include <linux/kmsan.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -385,6 +386,17 @@ static void prefetch_freepointer(const struct kmem_cache *s, void *object)
prefetchw(object + s->offset);
}
+/*
+ * When running under KMSAN, get_freepointer_safe() may return an uninitialized
+ * pointer value in the case the current thread loses the race for the next
+ * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
+ * slab_alloc_node() will fail, so the uninitialized value won't be used, but
+ * KMSAN will still check all arguments of cmpxchg because of imperfect
+ * handling of inline assembly.
+ * To work around this problem, we apply __no_kmsan_checks to ensure that
+ * get_freepointer_safe() returns initialized memory.
+ */
+__no_kmsan_checks
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
unsigned long freepointer_addr;
@@ -1679,6 +1691,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
void *x, bool init)
{
kmemleak_free_recursive(x, s->flags);
+ kmsan_slab_free(s, x);
debug_check_no_locks_freed(x, s->object_size);
@@ -5701,6 +5714,29 @@ STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
#endif /* CONFIG_SLUB_STATS */
+#ifdef CONFIG_KFENCE
+static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
+}
+
+static ssize_t skip_kfence_store(struct kmem_cache *s,
+ const char *buf, size_t length)
+{
+ int ret = length;
+
+ if (buf[0] == '0')
+ s->flags &= ~SLAB_SKIP_KFENCE;
+ else if (buf[0] == '1')
+ s->flags |= SLAB_SKIP_KFENCE;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+SLAB_ATTR(skip_kfence);
+#endif
+
static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
@@ -5768,6 +5804,9 @@ static struct attribute *slab_attrs[] = {
&failslab_attr.attr,
#endif
&usersize_attr.attr,
+#ifdef CONFIG_KFENCE
+ &skip_kfence_attr.attr,
+#endif
NULL
};
@@ -5870,6 +5909,7 @@ static char *create_unique_id(struct kmem_cache *s)
kfree(name);
return ERR_PTR(-EINVAL);
}
+ kmsan_unpoison_memory(name, p - name);
return name;
}
@@ -5973,6 +6013,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
al->name = name;
al->next = alias_list;
alias_list = al;
+ kmsan_unpoison_memory(al, sizeof(*al));
return 0;
}
diff --git a/mm/swap.c b/mm/swap.c
index 9cee7f6a3809..955930f41d20 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -366,7 +366,7 @@ static void folio_activate_drain(int cpu)
folio_batch_move_lru(fbatch, folio_activate_fn);
}
-static void folio_activate(struct folio *folio)
+void folio_activate(struct folio *folio)
{
if (folio_test_lru(folio) && !folio_test_active(folio) &&
!folio_test_unevictable(folio)) {
@@ -385,7 +385,7 @@ static inline void folio_activate_drain(int cpu)
{
}
-static void folio_activate(struct folio *folio)
+void folio_activate(struct folio *folio)
{
struct lruvec *lruvec;
@@ -428,6 +428,40 @@ static void __lru_cache_activate_folio(struct folio *folio)
local_unlock(&cpu_fbatches.lock);
}
+#ifdef CONFIG_LRU_GEN
+static void folio_inc_refs(struct folio *folio)
+{
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+
+ if (folio_test_unevictable(folio))
+ return;
+
+ if (!folio_test_referenced(folio)) {
+ folio_set_referenced(folio);
+ return;
+ }
+
+ if (!folio_test_workingset(folio)) {
+ folio_set_workingset(folio);
+ return;
+ }
+
+ /* see the comment on MAX_NR_TIERS */
+ do {
+ new_flags = old_flags & LRU_REFS_MASK;
+ if (new_flags == LRU_REFS_MASK)
+ break;
+
+ new_flags += BIT(LRU_REFS_PGOFF);
+ new_flags |= old_flags & ~LRU_REFS_MASK;
+ } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+}
+#else
+static void folio_inc_refs(struct folio *folio)
+{
+}
+#endif /* CONFIG_LRU_GEN */
+
/*
* Mark a page as having seen activity.
*
@@ -440,6 +474,11 @@ static void __lru_cache_activate_folio(struct folio *folio)
*/
void folio_mark_accessed(struct folio *folio)
{
+ if (lru_gen_enabled()) {
+ folio_inc_refs(folio);
+ return;
+ }
+
if (!folio_test_referenced(folio)) {
folio_set_referenced(folio);
} else if (folio_test_unevictable(folio)) {
@@ -484,6 +523,11 @@ void folio_add_lru(struct folio *folio)
folio_test_unevictable(folio), folio);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
+ /* see the comment in lru_gen_add_folio() */
+ if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
+ lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
+ folio_set_active(folio);
+
folio_get(folio);
local_lock(&cpu_fbatches.lock);
fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
@@ -493,22 +537,21 @@ void folio_add_lru(struct folio *folio)
EXPORT_SYMBOL(folio_add_lru);
/**
- * lru_cache_add_inactive_or_unevictable
- * @page: the page to be added to LRU
- * @vma: vma in which page is mapped for determining reclaimability
+ * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA.
+ * @folio: The folio to be added to the LRU.
+ * @vma: VMA in which the folio is mapped.
*
- * Place @page on the inactive or unevictable LRU list, depending on its
- * evictability.
+ * If the VMA is mlocked, @folio is added to the unevictable list.
+ * Otherwise, it is treated the same way as folio_add_lru().
*/
-void lru_cache_add_inactive_or_unevictable(struct page *page,
- struct vm_area_struct *vma)
+void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
{
- VM_BUG_ON_PAGE(PageLRU(page), page);
+ VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
- mlock_new_page(page);
+ mlock_new_page(&folio->page);
else
- lru_cache_add(page);
+ folio_add_lru(folio);
}
/*
@@ -575,7 +618,7 @@ static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio)
static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio)
{
- if (folio_test_active(folio) && !folio_test_unevictable(folio)) {
+ if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) {
long nr_pages = folio_nr_pages(folio);
lruvec_del_folio(lruvec, folio);
@@ -688,8 +731,8 @@ void deactivate_page(struct page *page)
{
struct folio *folio = page_folio(page);
- if (folio_test_lru(folio) && folio_test_active(folio) &&
- !folio_test_unevictable(folio)) {
+ if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
+ (folio_test_active(folio) || lru_gen_enabled())) {
struct folio_batch *fbatch;
folio_get(folio);
diff --git a/mm/swap.h b/mm/swap.h
index 17936e068c1c..cc08c459c619 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -18,9 +18,7 @@ static inline void swap_read_unplug(struct swap_iocb *plug)
}
void swap_write_unplug(struct swap_iocb *sio);
int swap_writepage(struct page *page, struct writeback_control *wbc);
-void end_swap_bio_write(struct bio *bio);
-int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func);
+int __swap_writepage(struct page *page, struct writeback_control *wbc);
/* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */
@@ -34,16 +32,15 @@ extern struct address_space *swapper_spaces[];
void show_swap_cache_info(void);
bool add_to_swap(struct folio *folio);
void *get_shadow_from_swap_cache(swp_entry_t entry);
-int add_to_swap_cache(struct page *page, swp_entry_t entry,
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp);
void __delete_from_swap_cache(struct folio *folio,
swp_entry_t entry, void *shadow);
void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end);
-struct page *lookup_swap_cache(swp_entry_t entry,
- struct vm_area_struct *vma,
- unsigned long addr);
+struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr);
struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index);
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
@@ -101,9 +98,8 @@ static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
return 0;
}
-static inline struct page *lookup_swap_cache(swp_entry_t swp,
- struct vm_area_struct *vma,
- unsigned long addr)
+static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
return NULL;
}
@@ -124,7 +120,7 @@ static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
return NULL;
}
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
+static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp_mask, void **shadowp)
{
return -1;
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index 5a9442979a18..db6c4a26cf59 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -170,6 +170,9 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
unsigned long length;
struct swap_cgroup_ctrl *ctrl;
+ if (mem_cgroup_disabled())
+ return 0;
+
length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
array = vcalloc(length, sizeof(void *));
@@ -204,6 +207,9 @@ void swap_cgroup_swapoff(int type)
unsigned long i, length;
struct swap_cgroup_ctrl *ctrl;
+ if (mem_cgroup_disabled())
+ return;
+
mutex_lock(&swap_cgroup_mutex);
ctrl = &swap_cgroup_ctrl[type];
map = ctrl->map;
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 10b94d64cc25..0bec1f705f8e 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -343,7 +343,7 @@ repeat:
get_swap_pages(1, &entry, 1);
out:
if (mem_cgroup_try_charge_swap(folio, entry)) {
- put_swap_page(&folio->page, entry);
+ put_swap_folio(folio, entry);
entry.val = 0;
}
return entry;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 41afa6d45b23..438d0676c5be 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -85,21 +85,21 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
* add_to_swap_cache resembles filemap_add_folio on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
-int add_to_swap_cache(struct page *page, swp_entry_t entry,
+int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
gfp_t gfp, void **shadowp)
{
struct address_space *address_space = swap_address_space(entry);
pgoff_t idx = swp_offset(entry);
- XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
- unsigned long i, nr = thp_nr_pages(page);
+ XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
+ unsigned long i, nr = folio_nr_pages(folio);
void *old;
- VM_BUG_ON_PAGE(!PageLocked(page), page);
- VM_BUG_ON_PAGE(PageSwapCache(page), page);
- VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
+ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
+ VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
- page_ref_add(page, nr);
- SetPageSwapCache(page);
+ folio_ref_add(folio, nr);
+ folio_set_swapcache(folio);
do {
xas_lock_irq(&xas);
@@ -107,19 +107,19 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
if (xas_error(&xas))
goto unlock;
for (i = 0; i < nr; i++) {
- VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
+ VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
old = xas_load(&xas);
if (xa_is_value(old)) {
if (shadowp)
*shadowp = old;
}
- set_page_private(page + i, entry.val + i);
- xas_store(&xas, page);
+ set_page_private(folio_page(folio, i), entry.val + i);
+ xas_store(&xas, folio);
xas_next(&xas);
}
address_space->nrpages += nr;
- __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
- __mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
+ __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
unlock:
xas_unlock_irq(&xas);
} while (xas_nomem(&xas, gfp));
@@ -127,8 +127,8 @@ unlock:
if (!xas_error(&xas))
return 0;
- ClearPageSwapCache(page);
- page_ref_sub(page, nr);
+ folio_clear_swapcache(folio);
+ folio_ref_sub(folio, nr);
return xas_error(&xas);
}
@@ -194,7 +194,7 @@ bool add_to_swap(struct folio *folio)
/*
* Add it to the swap cache.
*/
- err = add_to_swap_cache(&folio->page, entry,
+ err = add_to_swap_cache(folio, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
if (err)
/*
@@ -218,7 +218,7 @@ bool add_to_swap(struct folio *folio)
return true;
fail:
- put_swap_page(&folio->page, entry);
+ put_swap_folio(folio, entry);
return false;
}
@@ -237,7 +237,7 @@ void delete_from_swap_cache(struct folio *folio)
__delete_from_swap_cache(folio, entry, NULL);
xa_unlock_irq(&address_space->i_pages);
- put_swap_page(&folio->page, entry);
+ put_swap_folio(folio, entry);
folio_ref_sub(folio, folio_nr_pages(folio));
}
@@ -272,16 +272,19 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin,
/*
* If we are the only user, then try to free up the swap cache.
*
- * Its ok to check for PageSwapCache without the page lock
+ * Its ok to check the swapcache flag without the folio lock
* here because we are going to recheck again inside
- * try_to_free_swap() _with_ the lock.
+ * folio_free_swap() _with_ the lock.
* - Marcelo
*/
void free_swap_cache(struct page *page)
{
- if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
- try_to_free_swap(page);
- unlock_page(page);
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
+ folio_trylock(folio)) {
+ folio_free_swap(folio);
+ folio_unlock(folio);
}
}
@@ -317,24 +320,24 @@ static inline bool swap_use_vma_readahead(void)
}
/*
- * Lookup a swap entry in the swap cache. A found page will be returned
+ * Lookup a swap entry in the swap cache. A found folio will be returned
* unlocked and with its refcount incremented - we rely on the kernel
- * lock getting page table operations atomic even if we drop the page
+ * lock getting page table operations atomic even if we drop the folio
* lock before returning.
*/
-struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
- unsigned long addr)
+struct folio *swap_cache_get_folio(swp_entry_t entry,
+ struct vm_area_struct *vma, unsigned long addr)
{
- struct page *page;
+ struct folio *folio;
struct swap_info_struct *si;
si = get_swap_device(entry);
if (!si)
return NULL;
- page = find_get_page(swap_address_space(entry), swp_offset(entry));
+ folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
put_swap_device(si);
- if (page) {
+ if (folio) {
bool vma_ra = swap_use_vma_readahead();
bool readahead;
@@ -342,10 +345,10 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
*/
- if (unlikely(PageTransCompound(page)))
- return page;
+ if (unlikely(folio_test_large(folio)))
+ return folio;
- readahead = TestClearPageReadahead(page);
+ readahead = folio_test_clear_readahead(folio);
if (vma && vma_ra) {
unsigned long ra_val;
int win, hits;
@@ -366,7 +369,7 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
}
}
- return page;
+ return folio;
}
/**
@@ -411,7 +414,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
bool *new_page_allocated)
{
struct swap_info_struct *si;
- struct page *page;
+ struct folio *folio;
void *shadow = NULL;
*new_page_allocated = false;
@@ -420,17 +423,17 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
int err;
/*
* First check the swap cache. Since this is normally
- * called after lookup_swap_cache() failed, re-calling
+ * called after swap_cache_get_folio() failed, re-calling
* that would confuse statistics.
*/
si = get_swap_device(entry);
if (!si)
return NULL;
- page = find_get_page(swap_address_space(entry),
- swp_offset(entry));
+ folio = filemap_get_folio(swap_address_space(entry),
+ swp_offset(entry));
put_swap_device(si);
- if (page)
- return page;
+ if (folio)
+ return folio_file_page(folio, swp_offset(entry));
/*
* Just skip read ahead for unused swap slot.
@@ -448,8 +451,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
* cause any racers to loop around until we add it to cache.
*/
- page = alloc_page_vma(gfp_mask, vma, addr);
- if (!page)
+ folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
+ if (!folio)
return NULL;
/*
@@ -459,7 +462,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
if (!err)
break;
- put_page(page);
+ folio_put(folio);
if (err != -EEXIST)
return NULL;
@@ -477,30 +480,30 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* The swap entry is ours to swap in. Prepare the new page.
*/
- __SetPageLocked(page);
- __SetPageSwapBacked(page);
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
goto fail_unlock;
/* May fail (-ENOMEM) if XArray node allocation failed. */
- if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
+ if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
goto fail_unlock;
mem_cgroup_swapin_uncharge_swap(entry);
if (shadow)
- workingset_refault(page_folio(page), shadow);
+ workingset_refault(folio, shadow);
- /* Caller will initiate read into locked page */
- lru_cache_add(page);
+ /* Caller will initiate read into locked folio */
+ folio_add_lru(folio);
*new_page_allocated = true;
- return page;
+ return &folio->page;
fail_unlock:
- put_swap_page(page, entry);
- unlock_page(page);
- put_page(page);
+ put_swap_folio(folio, entry);
+ folio_unlock(folio);
+ folio_put(folio);
return NULL;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 82e62007881d..5fc1237a9f21 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -63,6 +63,10 @@ EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
long total_swap_pages;
static int least_priority = -1;
+unsigned long swapfile_maximum_size;
+#ifdef CONFIG_MIGRATION
+bool swap_migration_ad_supported;
+#endif /* CONFIG_MIGRATION */
static const char Bad_file[] = "Bad swap file entry ";
static const char Unused_file[] = "Unused swap file entry ";
@@ -128,27 +132,27 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si,
unsigned long offset, unsigned long flags)
{
swp_entry_t entry = swp_entry(si->type, offset);
- struct page *page;
+ struct folio *folio;
int ret = 0;
- page = find_get_page(swap_address_space(entry), offset);
- if (!page)
+ folio = filemap_get_folio(swap_address_space(entry), offset);
+ if (!folio)
return 0;
/*
* When this function is called from scan_swap_map_slots() and it's
- * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
+ * called by vmscan.c at reclaiming folios. So we hold a folio lock
* here. We have to use trylock for avoiding deadlock. This is a special
- * case and you should use try_to_free_swap() with explicit lock_page()
+ * case and you should use folio_free_swap() with explicit folio_lock()
* in usual operations.
*/
- if (trylock_page(page)) {
+ if (folio_trylock(folio)) {
if ((flags & TTRS_ANYWAY) ||
- ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
- ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
- ret = try_to_free_swap(page);
- unlock_page(page);
+ ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
+ ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)))
+ ret = folio_free_swap(folio);
+ folio_unlock(folio);
}
- put_page(page);
+ folio_put(folio);
return ret;
}
@@ -1328,7 +1332,7 @@ void swap_free(swp_entry_t entry)
/*
* Called after dropping swapcache to decrease refcnt to swap entries.
*/
-void put_swap_page(struct page *page, swp_entry_t entry)
+void put_swap_folio(struct folio *folio, swp_entry_t entry)
{
unsigned long offset = swp_offset(entry);
unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1337,7 +1341,7 @@ void put_swap_page(struct page *page, swp_entry_t entry)
unsigned char *map;
unsigned int i, free_entries = 0;
unsigned char val;
- int size = swap_entry_size(thp_nr_pages(page));
+ int size = swap_entry_size(folio_nr_pages(folio));
si = _swap_info_get(entry);
if (!si)
@@ -1427,30 +1431,6 @@ void swapcache_free_entries(swp_entry_t *entries, int n)
spin_unlock(&p->lock);
}
-/*
- * How many references to page are currently swapped out?
- * This does not give an exact answer when swap count is continued,
- * but does include the high COUNT_CONTINUED flag to allow for that.
- */
-static int page_swapcount(struct page *page)
-{
- int count = 0;
- struct swap_info_struct *p;
- struct swap_cluster_info *ci;
- swp_entry_t entry;
- unsigned long offset;
-
- entry.val = page_private(page);
- p = _swap_info_get(entry);
- if (p) {
- offset = swp_offset(entry);
- ci = lock_cluster_or_swap_info(p, offset);
- count = swap_count(p->swap_map[offset]);
- unlock_cluster_or_swap_info(p, ci);
- }
- return count;
-}
-
int __swap_count(swp_entry_t entry)
{
struct swap_info_struct *si;
@@ -1465,11 +1445,16 @@ int __swap_count(swp_entry_t entry)
return count;
}
+/*
+ * How many references to @entry are currently swapped out?
+ * This does not give an exact answer when swap count is continued,
+ * but does include the high COUNT_CONTINUED flag to allow for that.
+ */
static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
{
- int count = 0;
pgoff_t offset = swp_offset(entry);
struct swap_cluster_info *ci;
+ int count;
ci = lock_cluster_or_swap_info(si, offset);
count = swap_count(si->swap_map[offset]);
@@ -1570,56 +1555,59 @@ unlock_out:
static bool folio_swapped(struct folio *folio)
{
- swp_entry_t entry;
- struct swap_info_struct *si;
+ swp_entry_t entry = folio_swap_entry(folio);
+ struct swap_info_struct *si = _swap_info_get(entry);
+
+ if (!si)
+ return false;
if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
- return page_swapcount(&folio->page) != 0;
+ return swap_swapcount(si, entry) != 0;
- entry = folio_swap_entry(folio);
- si = _swap_info_get(entry);
- if (si)
- return swap_page_trans_huge_swapped(si, entry);
- return false;
+ return swap_page_trans_huge_swapped(si, entry);
}
-/*
- * If swap is getting full, or if there are no more mappings of this page,
- * then try_to_free_swap is called to free its swap space.
+/**
+ * folio_free_swap() - Free the swap space used for this folio.
+ * @folio: The folio to remove.
+ *
+ * If swap is getting full, or if there are no more mappings of this folio,
+ * then call folio_free_swap to free its swap space.
+ *
+ * Return: true if we were able to release the swap space.
*/
-int try_to_free_swap(struct page *page)
+bool folio_free_swap(struct folio *folio)
{
- struct folio *folio = page_folio(page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (!folio_test_swapcache(folio))
- return 0;
+ return false;
if (folio_test_writeback(folio))
- return 0;
+ return false;
if (folio_swapped(folio))
- return 0;
+ return false;
/*
* Once hibernation has begun to create its image of memory,
- * there's a danger that one of the calls to try_to_free_swap()
+ * there's a danger that one of the calls to folio_free_swap()
* - most probably a call from __try_to_reclaim_swap() while
* hibernation is allocating its own swap pages for the image,
* but conceivably even a call from memory reclaim - will free
- * the swap from a page which has already been recorded in the
- * image as a clean swapcache page, and then reuse its swap for
+ * the swap from a folio which has already been recorded in the
+ * image as a clean swapcache folio, and then reuse its swap for
* another page of the image. On waking from hibernation, the
- * original page might be freed under memory pressure, then
+ * original folio might be freed under memory pressure, then
* later read back in from swap, now with the wrong data.
*
* Hibernation suspends storage while it is writing the image
* to disk so check that here.
*/
if (pm_suspended_storage())
- return 0;
+ return false;
delete_from_swap_cache(folio);
folio_set_dirty(folio);
- return 1;
+ return true;
}
/*
@@ -1770,8 +1758,9 @@ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
* force COW, vm_page_prot omits write permission from any private vma.
*/
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long addr, swp_entry_t entry, struct page *page)
+ unsigned long addr, swp_entry_t entry, struct folio *folio)
{
+ struct page *page = folio_file_page(folio, swp_offset(entry));
struct page *swapcache;
spinlock_t *ptl;
pte_t *pte, new_pte;
@@ -1843,17 +1832,18 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned int type)
{
- struct page *page;
swp_entry_t entry;
pte_t *pte;
struct swap_info_struct *si;
- unsigned long offset;
int ret = 0;
volatile unsigned char *swap_map;
si = swap_info[type];
pte = pte_offset_map(pmd, addr);
do {
+ struct folio *folio;
+ unsigned long offset;
+
if (!is_swap_pte(*pte))
continue;
@@ -1864,8 +1854,9 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
offset = swp_offset(entry);
pte_unmap(pte);
swap_map = &si->swap_map[offset];
- page = lookup_swap_cache(entry, vma, addr);
- if (!page) {
+ folio = swap_cache_get_folio(entry, vma, addr);
+ if (!folio) {
+ struct page *page;
struct vm_fault vmf = {
.vma = vma,
.address = addr,
@@ -1875,25 +1866,27 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
&vmf);
+ if (page)
+ folio = page_folio(page);
}
- if (!page) {
+ if (!folio) {
if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD)
goto try_next;
return -ENOMEM;
}
- lock_page(page);
- wait_on_page_writeback(page);
- ret = unuse_pte(vma, pmd, addr, entry, page);
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ ret = unuse_pte(vma, pmd, addr, entry, folio);
if (ret < 0) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
goto out;
}
- try_to_free_swap(page);
- unlock_page(page);
- put_page(page);
+ folio_free_swap(folio);
+ folio_unlock(folio);
+ folio_put(folio);
try_next:
pte = pte_offset_map(pmd, addr);
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -1990,14 +1983,16 @@ static int unuse_mm(struct mm_struct *mm, unsigned int type)
{
struct vm_area_struct *vma;
int ret = 0;
+ VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ for_each_vma(vmi, vma) {
if (vma->anon_vma) {
ret = unuse_vma(vma, type);
if (ret)
break;
}
+
cond_resched();
}
mmap_read_unlock(mm);
@@ -2042,7 +2037,7 @@ static int try_to_unuse(unsigned int type)
struct list_head *p;
int retval = 0;
struct swap_info_struct *si = swap_info[type];
- struct page *page;
+ struct folio *folio;
swp_entry_t entry;
unsigned int i;
@@ -2092,21 +2087,21 @@ retry:
(i = find_next_to_unuse(si, i)) != 0) {
entry = swp_entry(type, i);
- page = find_get_page(swap_address_space(entry), i);
- if (!page)
+ folio = filemap_get_folio(swap_address_space(entry), i);
+ if (!folio)
continue;
/*
- * It is conceivable that a racing task removed this page from
- * swap cache just before we acquired the page lock. The page
+ * It is conceivable that a racing task removed this folio from
+ * swap cache just before we acquired the page lock. The folio
* might even be back in swap cache on another swap area. But
- * that is okay, try_to_free_swap() only removes stale pages.
+ * that is okay, folio_free_swap() only removes stale folios.
*/
- lock_page(page);
- wait_on_page_writeback(page);
- try_to_free_swap(page);
- unlock_page(page);
- put_page(page);
+ folio_lock(folio);
+ folio_wait_writeback(folio);
+ folio_free_swap(folio);
+ folio_unlock(folio);
+ folio_put(folio);
}
/*
@@ -2816,7 +2811,7 @@ unsigned long generic_max_swapfile_size(void)
}
/* Can be overridden by an architecture for additional checks. */
-__weak unsigned long max_swapfile_size(void)
+__weak unsigned long arch_max_swapfile_size(void)
{
return generic_max_swapfile_size();
}
@@ -2856,7 +2851,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
p->cluster_next = 1;
p->cluster_nr = 0;
- maxpages = max_swapfile_size();
+ maxpages = swapfile_maximum_size;
last_page = swap_header->info.last_page;
if (!last_page) {
pr_warn("Empty swap-file\n");
@@ -3677,6 +3672,13 @@ static int __init swapfile_init(void)
for_each_node(nid)
plist_head_init(&swap_avail_heads[nid]);
+ swapfile_maximum_size = arch_max_swapfile_size();
+
+#ifdef CONFIG_MIGRATION
+ if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
+ swap_migration_ad_supported = true;
+#endif /* CONFIG_MIGRATION */
+
return 0;
}
subsys_initcall(swapfile_init);
diff --git a/mm/truncate.c b/mm/truncate.c
index 0b0708bf935f..c0be77e5c008 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -240,7 +240,7 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
folio_invalidate(folio, offset, length);
if (!folio_test_large(folio))
return true;
- if (split_huge_page(&folio->page) == 0)
+ if (split_folio(folio) == 0)
return true;
if (folio_test_dirty(folio))
return false;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 7327b2573f7c..e24e8a47ce8a 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -243,20 +243,22 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
{
struct inode *inode = file_inode(dst_vma->vm_file);
pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
+ struct folio *folio;
struct page *page;
int ret;
- ret = shmem_getpage(inode, pgoff, &page, SGP_NOALLOC);
- /* Our caller expects us to return -EFAULT if we failed to find page. */
+ ret = shmem_get_folio(inode, pgoff, &folio, SGP_NOALLOC);
+ /* Our caller expects us to return -EFAULT if we failed to find folio */
if (ret == -ENOENT)
ret = -EFAULT;
if (ret)
goto out;
- if (!page) {
+ if (!folio) {
ret = -EFAULT;
goto out;
}
+ page = folio_file_page(folio, pgoff);
if (PageHWPoison(page)) {
ret = -EIO;
goto out_release;
@@ -267,13 +269,13 @@ static int mcontinue_atomic_pte(struct mm_struct *dst_mm,
if (ret)
goto out_release;
- unlock_page(page);
+ folio_unlock(folio);
ret = 0;
out:
return ret;
out_release:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
goto out;
}
@@ -377,30 +379,30 @@ retry:
BUG_ON(dst_addr >= dst_start + len);
/*
- * Serialize via i_mmap_rwsem and hugetlb_fault_mutex.
- * i_mmap_rwsem ensures the dst_pte remains valid even
+ * Serialize via vma_lock and hugetlb_fault_mutex.
+ * vma_lock ensures the dst_pte remains valid even
* in the case of shared pmds. fault mutex prevents
* races with other faulting threads.
*/
- mapping = dst_vma->vm_file->f_mapping;
- i_mmap_lock_read(mapping);
idx = linear_page_index(dst_vma, dst_addr);
+ mapping = dst_vma->vm_file->f_mapping;
hash = hugetlb_fault_mutex_hash(mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
+ hugetlb_vma_lock_read(dst_vma);
err = -ENOMEM;
dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
if (!dst_pte) {
+ hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
if (mode != MCOPY_ATOMIC_CONTINUE &&
!huge_pte_none_mostly(huge_ptep_get(dst_pte))) {
err = -EEXIST;
+ hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
goto out_unlock;
}
@@ -408,8 +410,8 @@ retry:
dst_addr, src_addr, mode, &page,
wp_copy);
+ hugetlb_vma_unlock_read(dst_vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
- i_mmap_unlock_read(mapping);
cond_resched();
diff --git a/mm/util.c b/mm/util.c
index 346e40177bc6..12984e76767e 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -272,38 +272,6 @@ void *memdup_user_nul(const void __user *src, size_t len)
}
EXPORT_SYMBOL(memdup_user_nul);
-void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
-{
- struct vm_area_struct *next;
-
- vma->vm_prev = prev;
- if (prev) {
- next = prev->vm_next;
- prev->vm_next = vma;
- } else {
- next = mm->mmap;
- mm->mmap = vma;
- }
- vma->vm_next = next;
- if (next)
- next->vm_prev = vma;
-}
-
-void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
-{
- struct vm_area_struct *prev, *next;
-
- next = vma->vm_next;
- prev = vma->vm_prev;
- if (prev)
- prev->vm_next = next;
- else
- mm->mmap = next;
- if (next)
- next->vm_prev = prev;
-}
-
/* Check if the vma is being used as a stack by this task */
int vma_is_stack_for_current(struct vm_area_struct *vma)
{
@@ -854,10 +822,10 @@ int folio_mapcount(struct folio *folio)
return atomic_read(&folio->_mapcount) + 1;
compound = folio_entire_mapcount(folio);
- nr = folio_nr_pages(folio);
if (folio_test_hugetlb(folio))
return compound;
ret = compound;
+ nr = folio_nr_pages(folio);
for (i = 0; i < nr; i++)
ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1;
/* File pages has compound_mapcount included in _mapcount */
@@ -1056,6 +1024,8 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
if (percpu_counter_read_positive(&vm_committed_as) < allowed)
return 0;
error:
+ pr_warn_ratelimited("%s: pid: %d, comm: %s, no enough memory for the allocation\n",
+ __func__, current->pid, current->comm);
vm_unacct_memory(pages);
return -ENOMEM;
diff --git a/mm/vmacache.c b/mm/vmacache.c
deleted file mode 100644
index 01a6e6688ec1..000000000000
--- a/mm/vmacache.c
+++ /dev/null
@@ -1,117 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2014 Davidlohr Bueso.
- */
-#include <linux/sched/signal.h>
-#include <linux/sched/task.h>
-#include <linux/mm.h>
-#include <linux/vmacache.h>
-
-/*
- * Hash based on the pmd of addr if configured with MMU, which provides a good
- * hit rate for workloads with spatial locality. Otherwise, use pages.
- */
-#ifdef CONFIG_MMU
-#define VMACACHE_SHIFT PMD_SHIFT
-#else
-#define VMACACHE_SHIFT PAGE_SHIFT
-#endif
-#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
-
-/*
- * This task may be accessing a foreign mm via (for example)
- * get_user_pages()->find_vma(). The vmacache is task-local and this
- * task's vmacache pertains to a different mm (ie, its own). There is
- * nothing we can do here.
- *
- * Also handle the case where a kernel thread has adopted this mm via
- * kthread_use_mm(). That kernel thread's vmacache is not applicable to this mm.
- */
-static inline bool vmacache_valid_mm(struct mm_struct *mm)
-{
- return current->mm == mm && !(current->flags & PF_KTHREAD);
-}
-
-void vmacache_update(unsigned long addr, struct vm_area_struct *newvma)
-{
- if (vmacache_valid_mm(newvma->vm_mm))
- current->vmacache.vmas[VMACACHE_HASH(addr)] = newvma;
-}
-
-static bool vmacache_valid(struct mm_struct *mm)
-{
- struct task_struct *curr;
-
- if (!vmacache_valid_mm(mm))
- return false;
-
- curr = current;
- if (mm->vmacache_seqnum != curr->vmacache.seqnum) {
- /*
- * First attempt will always be invalid, initialize
- * the new cache for this task here.
- */
- curr->vmacache.seqnum = mm->vmacache_seqnum;
- vmacache_flush(curr);
- return false;
- }
- return true;
-}
-
-struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
-{
- int idx = VMACACHE_HASH(addr);
- int i;
-
- count_vm_vmacache_event(VMACACHE_FIND_CALLS);
-
- if (!vmacache_valid(mm))
- return NULL;
-
- for (i = 0; i < VMACACHE_SIZE; i++) {
- struct vm_area_struct *vma = current->vmacache.vmas[idx];
-
- if (vma) {
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- if (WARN_ON_ONCE(vma->vm_mm != mm))
- break;
-#endif
- if (vma->vm_start <= addr && vma->vm_end > addr) {
- count_vm_vmacache_event(VMACACHE_FIND_HITS);
- return vma;
- }
- }
- if (++idx == VMACACHE_SIZE)
- idx = 0;
- }
-
- return NULL;
-}
-
-#ifndef CONFIG_MMU
-struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- int idx = VMACACHE_HASH(start);
- int i;
-
- count_vm_vmacache_event(VMACACHE_FIND_CALLS);
-
- if (!vmacache_valid(mm))
- return NULL;
-
- for (i = 0; i < VMACACHE_SIZE; i++) {
- struct vm_area_struct *vma = current->vmacache.vmas[idx];
-
- if (vma && vma->vm_start == start && vma->vm_end == end) {
- count_vm_vmacache_event(VMACACHE_FIND_HITS);
- return vma;
- }
- if (++idx == VMACACHE_SIZE)
- idx = 0;
- }
-
- return NULL;
-}
-#endif
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index dd6cdb201195..ccaa461998f3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -320,6 +320,9 @@ int ioremap_page_range(unsigned long addr, unsigned long end,
err = vmap_range_noflush(addr, end, phys_addr, pgprot_nx(prot),
ioremap_max_page_shift);
flush_cache_vmap(addr, end);
+ if (!err)
+ kmsan_ioremap_page_range(addr, end, phys_addr, prot,
+ ioremap_max_page_shift);
return err;
}
@@ -416,7 +419,7 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
*
* This is an internal function only. Do not use outside mm/.
*/
-void vunmap_range_noflush(unsigned long start, unsigned long end)
+void __vunmap_range_noflush(unsigned long start, unsigned long end)
{
unsigned long next;
pgd_t *pgd;
@@ -438,6 +441,12 @@ void vunmap_range_noflush(unsigned long start, unsigned long end)
arch_sync_kernel_mappings(start, end);
}
+void vunmap_range_noflush(unsigned long start, unsigned long end)
+{
+ kmsan_vunmap_range_noflush(start, end);
+ __vunmap_range_noflush(start, end);
+}
+
/**
* vunmap_range - unmap kernel virtual addresses
* @addr: start of the VM area to unmap
@@ -575,7 +584,7 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
*
* This is an internal function only. Do not use outside mm/.
*/
-int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+int __vmap_pages_range_noflush(unsigned long addr, unsigned long end,
pgprot_t prot, struct page **pages, unsigned int page_shift)
{
unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
@@ -590,7 +599,7 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
int err;
err = vmap_range_noflush(addr, addr + (1UL << page_shift),
- __pa(page_address(pages[i])), prot,
+ page_to_phys(pages[i]), prot,
page_shift);
if (err)
return err;
@@ -601,6 +610,13 @@ int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
return 0;
}
+int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
+ pgprot_t prot, struct page **pages, unsigned int page_shift)
+{
+ kmsan_vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+ return __vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
+}
+
/**
* vmap_pages_range - map pages to a kernel virtual address
* @addr: start of the VM area to map
@@ -1300,12 +1316,12 @@ find_vmap_lowest_match(struct rb_root *root, unsigned long size,
#include <linux/random.h>
static struct vmap_area *
-find_vmap_lowest_linear_match(unsigned long size,
+find_vmap_lowest_linear_match(struct list_head *head, unsigned long size,
unsigned long align, unsigned long vstart)
{
struct vmap_area *va;
- list_for_each_entry(va, &free_vmap_area_list, list) {
+ list_for_each_entry(va, head, list) {
if (!is_within_this_va(va, size, align, vstart))
continue;
@@ -1316,7 +1332,8 @@ find_vmap_lowest_linear_match(unsigned long size,
}
static void
-find_vmap_lowest_match_check(unsigned long size, unsigned long align)
+find_vmap_lowest_match_check(struct rb_root *root, struct list_head *head,
+ unsigned long size, unsigned long align)
{
struct vmap_area *va_1, *va_2;
unsigned long vstart;
@@ -1325,8 +1342,8 @@ find_vmap_lowest_match_check(unsigned long size, unsigned long align)
get_random_bytes(&rnd, sizeof(rnd));
vstart = VMALLOC_START + rnd;
- va_1 = find_vmap_lowest_match(size, align, vstart, false);
- va_2 = find_vmap_lowest_linear_match(size, align, vstart);
+ va_1 = find_vmap_lowest_match(root, size, align, vstart, false);
+ va_2 = find_vmap_lowest_linear_match(head, size, align, vstart);
if (va_1 != va_2)
pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
@@ -1513,7 +1530,7 @@ __alloc_vmap_area(struct rb_root *root, struct list_head *head,
return vend;
#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
- find_vmap_lowest_match_check(size, align);
+ find_vmap_lowest_match_check(root, head, size, align);
#endif
return nva_start_addr;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 382dbe97329f..04d8b88e5216 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -43,12 +43,17 @@
#include <linux/migrate.h>
#include <linux/delayacct.h>
#include <linux/sysctl.h>
+#include <linux/memory-tiers.h>
#include <linux/oom.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
#include <linux/dax.h>
#include <linux/psi.h>
+#include <linux/pagewalk.h>
+#include <linux/shmem_fs.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -85,7 +90,7 @@ struct scan_control {
unsigned long anon_cost;
unsigned long file_cost;
- /* Can active pages be deactivated as part of reclaim? */
+ /* Can active folios be deactivated as part of reclaim? */
#define DEACTIVATE_ANON 1
#define DEACTIVATE_FILE 2
unsigned int may_deactivate:2;
@@ -95,10 +100,10 @@ struct scan_control {
/* Writepage batching in laptop mode; RECLAIM_WRITE */
unsigned int may_writepage:1;
- /* Can mapped pages be reclaimed? */
+ /* Can mapped folios be reclaimed? */
unsigned int may_unmap:1;
- /* Can pages be swapped as part of reclaim? */
+ /* Can folios be swapped as part of reclaim? */
unsigned int may_swap:1;
/* Proactive reclaim invoked by userspace through memory.reclaim */
@@ -123,19 +128,25 @@ struct scan_control {
/* There is easily reclaimable cold cache in the current node */
unsigned int cache_trim_mode:1;
- /* The file pages on the current node are dangerously low */
+ /* The file folios on the current node are dangerously low */
unsigned int file_is_tiny:1;
/* Always discard instead of demoting to lower tier memory */
unsigned int no_demotion:1;
+#ifdef CONFIG_LRU_GEN
+ /* help kswapd make better choices among multiple memcgs */
+ unsigned int memcgs_need_aging:1;
+ unsigned long last_reclaimed;
+#endif
+
/* Allocation order */
s8 order;
/* Scan (total_size >> priority) pages at once */
s8 priority;
- /* The highest zone to isolate pages for reclaim from */
+ /* The highest zone to isolate folios for reclaim from */
s8 reclaim_idx;
/* This context's GFP mask */
@@ -443,7 +454,7 @@ static bool cgroup_reclaim(struct scan_control *sc)
*
* The normal page dirty throttling mechanism in balance_dirty_pages() is
* completely broken with the legacy memcg and direct stalling in
- * shrink_page_list() is used for throttling instead, which lacks all the
+ * shrink_folio_list() is used for throttling instead, which lacks all the
* niceties such as fairness, adaptive pausing, bandwidth proportional
* allocation and configurability.
*
@@ -564,9 +575,9 @@ static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg,
}
/*
- * This misses isolated pages which are not accounted for to save counters.
+ * This misses isolated folios which are not accounted for to save counters.
* As the data only determines if reclaim or compaction continues, it is
- * not expected that isolated pages will be a dominating factor.
+ * not expected that isolated folios will be a dominating factor.
*/
unsigned long zone_reclaimable_pages(struct zone *zone)
{
@@ -1039,9 +1050,9 @@ void drop_slab(void)
static inline int is_page_cache_freeable(struct folio *folio)
{
/*
- * A freeable page cache page is referenced only by the caller
- * that isolated the page, the page cache and optional buffer
- * heads at page->private.
+ * A freeable page cache folio is referenced only by the caller
+ * that isolated the folio, the page cache and optional filesystem
+ * private data at folio->private.
*/
return folio_ref_count(folio) - folio_test_private(folio) ==
1 + folio_nr_pages(folio);
@@ -1081,8 +1092,8 @@ static bool skip_throttle_noprogress(pg_data_t *pgdat)
return true;
/*
- * If there are a lot of dirty/writeback pages then do not
- * throttle as throttling will occur when the pages cycle
+ * If there are a lot of dirty/writeback folios then do not
+ * throttle as throttling will occur when the folios cycle
* towards the end of the LRU if still under writeback.
*/
for (i = 0; i < MAX_NR_ZONES; i++) {
@@ -1125,7 +1136,7 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
* short. Failing to make progress or waiting on writeback are
* potentially long-lived events so use a longer timeout. This is shaky
* logic as a failure to make progress could be due to anything from
- * writeback to a slow device to excessive references pages at the tail
+ * writeback to a slow device to excessive referenced folios at the tail
* of the inactive LRU.
*/
switch(reason) {
@@ -1171,8 +1182,8 @@ void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
}
/*
- * Account for pages written if tasks are throttled waiting on dirty
- * pages to clean. If enough pages have been cleaned since throttling
+ * Account for folios written if tasks are throttled waiting on dirty
+ * folios to clean. If enough folios have been cleaned since throttling
* started then wakeup the throttled tasks.
*/
void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
@@ -1198,18 +1209,18 @@ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
/* possible outcome of pageout() */
typedef enum {
- /* failed to write page out, page is locked */
+ /* failed to write folio out, folio is locked */
PAGE_KEEP,
- /* move page to the active list, page is locked */
+ /* move folio to the active list, folio is locked */
PAGE_ACTIVATE,
- /* page has been sent to the disk successfully, page is unlocked */
+ /* folio has been sent to the disk successfully, folio is unlocked */
PAGE_SUCCESS,
- /* page is clean and locked */
+ /* folio is clean and locked */
PAGE_CLEAN,
} pageout_t;
/*
- * pageout is called by shrink_page_list() for each dirty page.
+ * pageout is called by shrink_folio_list() for each dirty folio.
* Calls ->writepage().
*/
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
@@ -1283,7 +1294,7 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
}
/*
- * Same as remove_mapping, but if the page is removed from the mapping, it
+ * Same as remove_mapping, but if the folio is removed from the mapping, it
* gets returned with a refcount of 0.
*/
static int __remove_mapping(struct address_space *mapping, struct folio *folio,
@@ -1299,34 +1310,34 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
spin_lock(&mapping->host->i_lock);
xa_lock_irq(&mapping->i_pages);
/*
- * The non racy check for a busy page.
+ * The non racy check for a busy folio.
*
* Must be careful with the order of the tests. When someone has
- * a ref to the page, it may be possible that they dirty it then
- * drop the reference. So if PageDirty is tested before page_count
- * here, then the following race may occur:
+ * a ref to the folio, it may be possible that they dirty it then
+ * drop the reference. So if the dirty flag is tested before the
+ * refcount here, then the following race may occur:
*
* get_user_pages(&page);
* [user mapping goes away]
* write_to(page);
- * !PageDirty(page) [good]
- * SetPageDirty(page);
- * put_page(page);
- * !page_count(page) [good, discard it]
+ * !folio_test_dirty(folio) [good]
+ * folio_set_dirty(folio);
+ * folio_put(folio);
+ * !refcount(folio) [good, discard it]
*
* [oops, our write_to data is lost]
*
* Reversing the order of the tests ensures such a situation cannot
- * escape unnoticed. The smp_rmb is needed to ensure the page->flags
- * load is not satisfied before that of page->_refcount.
+ * escape unnoticed. The smp_rmb is needed to ensure the folio->flags
+ * load is not satisfied before that of folio->_refcount.
*
- * Note that if SetPageDirty is always performed via set_page_dirty,
+ * Note that if the dirty flag is always set via folio_mark_dirty,
* and thus under the i_pages lock, then this ordering is not required.
*/
refcount = 1 + folio_nr_pages(folio);
if (!folio_ref_freeze(folio, refcount))
goto cannot_free;
- /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
+ /* note: atomic_cmpxchg in folio_ref_freeze provides the smp_rmb */
if (unlikely(folio_test_dirty(folio))) {
folio_ref_unfreeze(folio, refcount);
goto cannot_free;
@@ -1334,12 +1345,14 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
if (folio_test_swapcache(folio)) {
swp_entry_t swap = folio_swap_entry(folio);
- mem_cgroup_swapout(folio, swap);
+
+ /* get a shadow entry before mem_cgroup_swapout() clears folio_memcg() */
if (reclaimed && !mapping_exiting(mapping))
shadow = workingset_eviction(folio, target_memcg);
+ mem_cgroup_swapout(folio, swap);
__delete_from_swap_cache(folio, swap, shadow);
xa_unlock_irq(&mapping->i_pages);
- put_swap_page(&folio->page, swap);
+ put_swap_folio(folio, swap);
} else {
void (*free_folio)(struct folio *);
@@ -1355,7 +1368,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
* back.
*
* We also don't store shadows for DAX mappings because the
- * only page cache pages found in these are zero pages
+ * only page cache folios found in these are zero pages
* covering holes, and because we don't want to mix DAX
* exceptional entries and shadow exceptional entries in the
* same address_space.
@@ -1423,14 +1436,14 @@ void folio_putback_lru(struct folio *folio)
folio_put(folio); /* drop ref from isolate */
}
-enum page_references {
- PAGEREF_RECLAIM,
- PAGEREF_RECLAIM_CLEAN,
- PAGEREF_KEEP,
- PAGEREF_ACTIVATE,
+enum folio_references {
+ FOLIOREF_RECLAIM,
+ FOLIOREF_RECLAIM_CLEAN,
+ FOLIOREF_KEEP,
+ FOLIOREF_ACTIVATE,
};
-static enum page_references folio_check_references(struct folio *folio,
+static enum folio_references folio_check_references(struct folio *folio,
struct scan_control *sc)
{
int referenced_ptes, referenced_folio;
@@ -1445,11 +1458,11 @@ static enum page_references folio_check_references(struct folio *folio,
* Let the folio, now marked Mlocked, be moved to the unevictable list.
*/
if (vm_flags & VM_LOCKED)
- return PAGEREF_ACTIVATE;
+ return FOLIOREF_ACTIVATE;
/* rmap lock contention: rotate */
if (referenced_ptes == -1)
- return PAGEREF_KEEP;
+ return FOLIOREF_KEEP;
if (referenced_ptes) {
/*
@@ -1469,34 +1482,34 @@ static enum page_references folio_check_references(struct folio *folio,
folio_set_referenced(folio);
if (referenced_folio || referenced_ptes > 1)
- return PAGEREF_ACTIVATE;
+ return FOLIOREF_ACTIVATE;
/*
* Activate file-backed executable folios after first usage.
*/
if ((vm_flags & VM_EXEC) && folio_is_file_lru(folio))
- return PAGEREF_ACTIVATE;
+ return FOLIOREF_ACTIVATE;
- return PAGEREF_KEEP;
+ return FOLIOREF_KEEP;
}
/* Reclaim if clean, defer dirty folios to writeback */
if (referenced_folio && folio_is_file_lru(folio))
- return PAGEREF_RECLAIM_CLEAN;
+ return FOLIOREF_RECLAIM_CLEAN;
- return PAGEREF_RECLAIM;
+ return FOLIOREF_RECLAIM;
}
-/* Check if a page is dirty or under writeback */
+/* Check if a folio is dirty or under writeback */
static void folio_check_dirty_writeback(struct folio *folio,
bool *dirty, bool *writeback)
{
struct address_space *mapping;
/*
- * Anonymous pages are not handled by flushers and must be written
+ * Anonymous folios are not handled by flushers and must be written
* from reclaim context. Do not stall reclaim based on them.
- * MADV_FREE anonymous pages are put into inactive file list too.
+ * MADV_FREE anonymous folios are put into inactive file list too.
* They could be mistakenly treated as file lru. So further anon
* test is needed.
*/
@@ -1520,44 +1533,71 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
-static struct page *alloc_demote_page(struct page *page, unsigned long node)
+static struct page *alloc_demote_page(struct page *page, unsigned long private)
{
- struct migration_target_control mtc = {
- /*
- * Allocate from 'node', or fail quickly and quietly.
- * When this happens, 'page' will likely just be discarded
- * instead of migrated.
- */
- .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
- __GFP_THISNODE | __GFP_NOWARN |
- __GFP_NOMEMALLOC | GFP_NOWAIT,
- .nid = node
- };
+ struct page *target_page;
+ nodemask_t *allowed_mask;
+ struct migration_target_control *mtc;
+
+ mtc = (struct migration_target_control *)private;
+
+ allowed_mask = mtc->nmask;
+ /*
+ * make sure we allocate from the target node first also trying to
+ * demote or reclaim pages from the target node via kswapd if we are
+ * low on free memory on target node. If we don't do this and if
+ * we have free memory on the slower(lower) memtier, we would start
+ * allocating pages from slower(lower) memory tiers without even forcing
+ * a demotion of cold pages from the target memtier. This can result
+ * in the kernel placing hot pages in slower(lower) memory tiers.
+ */
+ mtc->nmask = NULL;
+ mtc->gfp_mask |= __GFP_THISNODE;
+ target_page = alloc_migration_target(page, (unsigned long)mtc);
+ if (target_page)
+ return target_page;
+
+ mtc->gfp_mask &= ~__GFP_THISNODE;
+ mtc->nmask = allowed_mask;
- return alloc_migration_target(page, (unsigned long)&mtc);
+ return alloc_migration_target(page, (unsigned long)mtc);
}
/*
- * Take pages on @demote_list and attempt to demote them to
- * another node. Pages which are not demoted are left on
- * @demote_pages.
+ * Take folios on @demote_folios and attempt to demote them to another node.
+ * Folios which are not demoted are left on @demote_folios.
*/
-static unsigned int demote_page_list(struct list_head *demote_pages,
+static unsigned int demote_folio_list(struct list_head *demote_folios,
struct pglist_data *pgdat)
{
int target_nid = next_demotion_node(pgdat->node_id);
unsigned int nr_succeeded;
+ nodemask_t allowed_mask;
+
+ struct migration_target_control mtc = {
+ /*
+ * Allocate from 'node', or fail quickly and quietly.
+ * When this happens, 'page' will likely just be discarded
+ * instead of migrated.
+ */
+ .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
+ __GFP_NOMEMALLOC | GFP_NOWAIT,
+ .nid = target_nid,
+ .nmask = &allowed_mask
+ };
- if (list_empty(demote_pages))
+ if (list_empty(demote_folios))
return 0;
if (target_nid == NUMA_NO_NODE)
return 0;
+ node_get_allowed_targets(pgdat, &allowed_mask);
+
/* Demotion ignores all cpuset and mempolicy settings */
- migrate_pages(demote_pages, alloc_demote_page, NULL,
- target_nid, MIGRATE_ASYNC, MR_DEMOTION,
- &nr_succeeded);
+ migrate_pages(demote_folios, alloc_demote_page, NULL,
+ (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
+ &nr_succeeded);
if (current_is_kswapd())
__count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);
@@ -1584,17 +1624,15 @@ static bool may_enter_fs(struct folio *folio, gfp_t gfp_mask)
}
/*
- * shrink_page_list() returns the number of reclaimed pages
+ * shrink_folio_list() returns the number of reclaimed pages
*/
-static unsigned int shrink_page_list(struct list_head *page_list,
- struct pglist_data *pgdat,
- struct scan_control *sc,
- struct reclaim_stat *stat,
- bool ignore_references)
-{
- LIST_HEAD(ret_pages);
- LIST_HEAD(free_pages);
- LIST_HEAD(demote_pages);
+static unsigned int shrink_folio_list(struct list_head *folio_list,
+ struct pglist_data *pgdat, struct scan_control *sc,
+ struct reclaim_stat *stat, bool ignore_references)
+{
+ LIST_HEAD(ret_folios);
+ LIST_HEAD(free_folios);
+ LIST_HEAD(demote_folios);
unsigned int nr_reclaimed = 0;
unsigned int pgactivate = 0;
bool do_demote_pass;
@@ -1605,16 +1643,16 @@ static unsigned int shrink_page_list(struct list_head *page_list,
do_demote_pass = can_demote(pgdat->node_id, sc);
retry:
- while (!list_empty(page_list)) {
+ while (!list_empty(folio_list)) {
struct address_space *mapping;
struct folio *folio;
- enum page_references references = PAGEREF_RECLAIM;
+ enum folio_references references = FOLIOREF_RECLAIM;
bool dirty, writeback;
unsigned int nr_pages;
cond_resched();
- folio = lru_to_folio(page_list);
+ folio = lru_to_folio(folio_list);
list_del(&folio->lru);
if (!folio_trylock(folio))
@@ -1633,6 +1671,11 @@ retry:
if (!sc->may_unmap && folio_mapped(folio))
goto keep_locked;
+ /* folio_update_gen() tried to promote this page? */
+ if (lru_gen_enabled() && !ignore_references &&
+ folio_mapped(folio) && folio_test_referenced(folio))
+ goto keep_locked;
+
/*
* The number of dirty pages determines if a node is marked
* reclaim_congested. kswapd will stall and start writing
@@ -1733,7 +1776,7 @@ retry:
folio_unlock(folio);
folio_wait_writeback(folio);
/* then go back and try same folio again */
- list_add_tail(&folio->lru, page_list);
+ list_add_tail(&folio->lru, folio_list);
continue;
}
}
@@ -1742,13 +1785,13 @@ retry:
references = folio_check_references(folio, sc);
switch (references) {
- case PAGEREF_ACTIVATE:
+ case FOLIOREF_ACTIVATE:
goto activate_locked;
- case PAGEREF_KEEP:
+ case FOLIOREF_KEEP:
stat->nr_ref_keep += nr_pages;
goto keep_locked;
- case PAGEREF_RECLAIM:
- case PAGEREF_RECLAIM_CLEAN:
+ case FOLIOREF_RECLAIM:
+ case FOLIOREF_RECLAIM_CLEAN:
; /* try to reclaim the folio below */
}
@@ -1758,7 +1801,7 @@ retry:
*/
if (do_demote_pass &&
(thp_migration_supported() || !folio_test_large(folio))) {
- list_add(&folio->lru, &demote_pages);
+ list_add(&folio->lru, &demote_folios);
folio_unlock(folio);
continue;
}
@@ -1785,7 +1828,7 @@ retry:
*/
if (!folio_entire_mapcount(folio) &&
split_folio_to_list(folio,
- page_list))
+ folio_list))
goto activate_locked;
}
if (!add_to_swap(folio)) {
@@ -1793,7 +1836,7 @@ retry:
goto activate_locked_split;
/* Fallback to swap normal pages */
if (split_folio_to_list(folio,
- page_list))
+ folio_list))
goto activate_locked;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
count_vm_event(THP_SWPOUT_FALLBACK);
@@ -1805,7 +1848,7 @@ retry:
} else if (folio_test_swapbacked(folio) &&
folio_test_large(folio)) {
/* Split shmem folio */
- if (split_folio_to_list(folio, page_list))
+ if (split_folio_to_list(folio, folio_list))
goto keep_locked;
}
@@ -1870,7 +1913,7 @@ retry:
goto activate_locked;
}
- if (references == PAGEREF_RECLAIM_CLEAN)
+ if (references == FOLIOREF_RECLAIM_CLEAN)
goto keep_locked;
if (!may_enter_fs(folio, sc->gfp_mask))
goto keep_locked;
@@ -1983,13 +2026,13 @@ free_it:
nr_reclaimed += nr_pages;
/*
- * Is there need to periodically free_page_list? It would
+ * Is there need to periodically free_folio_list? It would
* appear not as the counts should be low
*/
if (unlikely(folio_test_large(folio)))
destroy_large_folio(folio);
else
- list_add(&folio->lru, &free_pages);
+ list_add(&folio->lru, &free_folios);
continue;
activate_locked_split:
@@ -2004,9 +2047,8 @@ activate_locked_split:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
if (folio_test_swapcache(folio) &&
- (mem_cgroup_swap_full(&folio->page) ||
- folio_test_mlocked(folio)))
- try_to_free_swap(&folio->page);
+ (mem_cgroup_swap_full(folio) || folio_test_mlocked(folio)))
+ folio_free_swap(folio);
VM_BUG_ON_FOLIO(folio_test_active(folio), folio);
if (!folio_test_mlocked(folio)) {
int type = folio_is_file_lru(folio);
@@ -2017,29 +2059,29 @@ activate_locked:
keep_locked:
folio_unlock(folio);
keep:
- list_add(&folio->lru, &ret_pages);
+ list_add(&folio->lru, &ret_folios);
VM_BUG_ON_FOLIO(folio_test_lru(folio) ||
folio_test_unevictable(folio), folio);
}
- /* 'page_list' is always empty here */
+ /* 'folio_list' is always empty here */
/* Migrate folios selected for demotion */
- nr_reclaimed += demote_page_list(&demote_pages, pgdat);
- /* Folios that could not be demoted are still in @demote_pages */
- if (!list_empty(&demote_pages)) {
- /* Folios which weren't demoted go back on @page_list for retry: */
- list_splice_init(&demote_pages, page_list);
+ nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
+ /* Folios that could not be demoted are still in @demote_folios */
+ if (!list_empty(&demote_folios)) {
+ /* Folios which weren't demoted go back on @folio_list for retry: */
+ list_splice_init(&demote_folios, folio_list);
do_demote_pass = false;
goto retry;
}
pgactivate = stat->nr_activate[0] + stat->nr_activate[1];
- mem_cgroup_uncharge_list(&free_pages);
+ mem_cgroup_uncharge_list(&free_folios);
try_to_unmap_flush();
- free_unref_page_list(&free_pages);
+ free_unref_page_list(&free_folios);
- list_splice(&ret_pages, page_list);
+ list_splice(&ret_folios, folio_list);
count_vm_events(PGACTIVATE, pgactivate);
if (plug)
@@ -2048,7 +2090,7 @@ keep:
}
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
- struct list_head *folio_list)
+ struct list_head *folio_list)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
@@ -2076,7 +2118,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
* change in the future.
*/
noreclaim_flag = memalloc_noreclaim_save();
- nr_reclaimed = shrink_page_list(&clean_folios, zone->zone_pgdat, &sc,
+ nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
&stat, true);
memalloc_noreclaim_restore(noreclaim_flag);
@@ -2135,7 +2177,7 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
*
* returns how many pages were moved onto *@dst.
*/
-static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
+static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
struct lruvec *lruvec, struct list_head *dst,
unsigned long *nr_scanned, struct scan_control *sc,
enum lru_list lru)
@@ -2242,8 +2284,8 @@ move:
*
* Context:
*
- * (1) Must be called with an elevated refcount on the page. This is a
- * fundamental difference from isolate_lru_pages() (which is called
+ * (1) Must be called with an elevated refcount on the folio. This is a
+ * fundamental difference from isolate_lru_folios() (which is called
* without a stable reference).
* (2) The lru_lock must not be held.
* (3) Interrupts must be enabled.
@@ -2315,13 +2357,13 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
}
/*
- * move_pages_to_lru() moves folios from private @list to appropriate LRU list.
+ * move_folios_to_lru() moves folios from private @list to appropriate LRU list.
* On return, @list is reused as a list of folios to be freed by the caller.
*
* Returns the number of pages moved to the given lruvec.
*/
-static unsigned int move_pages_to_lru(struct lruvec *lruvec,
- struct list_head *list)
+static unsigned int move_folios_to_lru(struct lruvec *lruvec,
+ struct list_head *list)
{
int nr_pages, nr_moved = 0;
LIST_HEAD(folios_to_free);
@@ -2341,7 +2383,7 @@ static unsigned int move_pages_to_lru(struct lruvec *lruvec,
/*
* The folio_set_lru needs to be kept here for list integrity.
* Otherwise:
- * #0 move_pages_to_lru #1 release_pages
+ * #0 move_folios_to_lru #1 release_pages
* if (!folio_put_testzero())
* if (folio_put_testzero())
* !lru //skip lru_lock
@@ -2398,11 +2440,11 @@ static int current_may_throttle(void)
* shrink_inactive_list() is a helper for shrink_node(). It returns the number
* of reclaimed pages
*/
-static unsigned long
-shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
- struct scan_control *sc, enum lru_list lru)
+static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
+ struct lruvec *lruvec, struct scan_control *sc,
+ enum lru_list lru)
{
- LIST_HEAD(page_list);
+ LIST_HEAD(folio_list);
unsigned long nr_scanned;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
@@ -2429,7 +2471,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_lock_irq(&lruvec->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
+ nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &folio_list,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
@@ -2444,10 +2486,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
if (nr_taken == 0)
return 0;
- nr_reclaimed = shrink_page_list(&page_list, pgdat, sc, &stat, false);
+ nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false);
spin_lock_irq(&lruvec->lru_lock);
- move_pages_to_lru(lruvec, &page_list);
+ move_folios_to_lru(lruvec, &folio_list);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
@@ -2458,16 +2500,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
spin_unlock_irq(&lruvec->lru_lock);
lru_note_cost(lruvec, file, stat.nr_pageout);
- mem_cgroup_uncharge_list(&page_list);
- free_unref_page_list(&page_list);
+ mem_cgroup_uncharge_list(&folio_list);
+ free_unref_page_list(&folio_list);
/*
- * If dirty pages are scanned that are not queued for IO, it
+ * If dirty folios are scanned that are not queued for IO, it
* implies that flushers are not doing their job. This can
- * happen when memory pressure pushes dirty pages to the end of
+ * happen when memory pressure pushes dirty folios to the end of
* the LRU before the dirty limits are breached and the dirty
* data has expired. It can also happen when the proportion of
- * dirty pages grows not through writes but through memory
+ * dirty folios grows not through writes but through memory
* pressure reclaiming all the clean cache. And in some cases,
* the flushers simply cannot keep up with the allocation
* rate. Nudge the flusher threads in case they are asleep.
@@ -2526,7 +2568,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_lock_irq(&lruvec->lru_lock);
- nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
+ nr_taken = isolate_lru_folios(nr_to_scan, lruvec, &l_hold,
&nr_scanned, sc, lru);
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
@@ -2586,8 +2628,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
*/
spin_lock_irq(&lruvec->lru_lock);
- nr_activate = move_pages_to_lru(lruvec, &l_active);
- nr_deactivate = move_pages_to_lru(lruvec, &l_inactive);
+ nr_activate = move_folios_to_lru(lruvec, &l_active);
+ nr_deactivate = move_folios_to_lru(lruvec, &l_inactive);
/* Keep all free folios in l_active list */
list_splice(&l_inactive, &l_active);
@@ -2603,7 +2645,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
nr_deactivate, nr_rotated, sc->priority, file);
}
-static unsigned int reclaim_page_list(struct list_head *page_list,
+static unsigned int reclaim_folio_list(struct list_head *folio_list,
struct pglist_data *pgdat)
{
struct reclaim_stat dummy_stat;
@@ -2617,9 +2659,9 @@ static unsigned int reclaim_page_list(struct list_head *page_list,
.no_demotion = 1,
};
- nr_reclaimed = shrink_page_list(page_list, pgdat, &sc, &dummy_stat, false);
- while (!list_empty(page_list)) {
- folio = lru_to_folio(page_list);
+ nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, false);
+ while (!list_empty(folio_list)) {
+ folio = lru_to_folio(folio_list);
list_del(&folio->lru);
folio_putback_lru(folio);
}
@@ -2649,11 +2691,11 @@ unsigned long reclaim_pages(struct list_head *folio_list)
continue;
}
- nr_reclaimed += reclaim_page_list(&node_folio_list, NODE_DATA(nid));
+ nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
nid = folio_nid(lru_to_folio(folio_list));
} while (!list_empty(folio_list));
- nr_reclaimed += reclaim_page_list(&node_folio_list, NODE_DATA(nid));
+ nr_reclaimed += reclaim_folio_list(&node_folio_list, NODE_DATA(nid));
memalloc_noreclaim_restore(noreclaim_flag);
@@ -2683,13 +2725,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
* but large enough to avoid thrashing the aggregate readahead window.
*
* Both inactive lists should also be large enough that each inactive
- * page has a chance to be referenced again before it is reclaimed.
+ * folio has a chance to be referenced again before it is reclaimed.
*
* If that fails and refaulting is observed, the inactive list grows.
*
- * The inactive_ratio is the target ratio of ACTIVE to INACTIVE pages
+ * The inactive_ratio is the target ratio of ACTIVE to INACTIVE folios
* on this LRU, maintained by the pageout code. An inactive_ratio
- * of 3 means 3:1 or 25% of the pages are kept on the inactive list.
+ * of 3 means 3:1 or 25% of the folios are kept on the inactive list.
*
* total target max
* memory ratio inactive
@@ -2728,12 +2770,118 @@ enum scan_balance {
SCAN_FILE,
};
+static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
+{
+ unsigned long file;
+ struct lruvec *target_lruvec;
+
+ if (lru_gen_enabled())
+ return;
+
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
+ /*
+ * Flush the memory cgroup stats, so that we read accurate per-memcg
+ * lruvec stats for heuristics.
+ */
+ mem_cgroup_flush_stats();
+
+ /*
+ * Determine the scan balance between anon and file LRUs.
+ */
+ spin_lock_irq(&target_lruvec->lru_lock);
+ sc->anon_cost = target_lruvec->anon_cost;
+ sc->file_cost = target_lruvec->file_cost;
+ spin_unlock_irq(&target_lruvec->lru_lock);
+
+ /*
+ * Target desirable inactive:active list ratios for the anon
+ * and file LRU lists.
+ */
+ if (!sc->force_deactivate) {
+ unsigned long refaults;
+
+ /*
+ * When refaults are being observed, it means a new
+ * workingset is being established. Deactivate to get
+ * rid of any stale active pages quickly.
+ */
+ refaults = lruvec_page_state(target_lruvec,
+ WORKINGSET_ACTIVATE_ANON);
+ if (refaults != target_lruvec->refaults[WORKINGSET_ANON] ||
+ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+ sc->may_deactivate |= DEACTIVATE_ANON;
+ else
+ sc->may_deactivate &= ~DEACTIVATE_ANON;
+
+ refaults = lruvec_page_state(target_lruvec,
+ WORKINGSET_ACTIVATE_FILE);
+ if (refaults != target_lruvec->refaults[WORKINGSET_FILE] ||
+ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+ sc->may_deactivate |= DEACTIVATE_FILE;
+ else
+ sc->may_deactivate &= ~DEACTIVATE_FILE;
+ } else
+ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
+
+ /*
+ * If we have plenty of inactive file pages that aren't
+ * thrashing, try to reclaim those first before touching
+ * anonymous pages.
+ */
+ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+ sc->cache_trim_mode = 1;
+ else
+ sc->cache_trim_mode = 0;
+
+ /*
+ * Prevent the reclaimer from falling into the cache trap: as
+ * cache pages start out inactive, every cache fault will tip
+ * the scan balance towards the file LRU. And as the file LRU
+ * shrinks, so does the window for rotation from references.
+ * This means we have a runaway feedback loop where a tiny
+ * thrashing file LRU becomes infinitely more attractive than
+ * anon pages. Try to detect this based on file LRU size.
+ */
+ if (!cgroup_reclaim(sc)) {
+ unsigned long total_high_wmark = 0;
+ unsigned long free, anon;
+ int z;
+
+ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+ node_page_state(pgdat, NR_INACTIVE_FILE);
+
+ for (z = 0; z < MAX_NR_ZONES; z++) {
+ struct zone *zone = &pgdat->node_zones[z];
+
+ if (!managed_zone(zone))
+ continue;
+
+ total_high_wmark += high_wmark_pages(zone);
+ }
+
+ /*
+ * Consider anon: if that's low too, this isn't a
+ * runaway file reclaim problem, but rather just
+ * extreme pressure. Reclaim as per usual then.
+ */
+ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
+
+ sc->file_is_tiny =
+ file + free <= total_high_wmark &&
+ !(sc->may_deactivate & DEACTIVATE_ANON) &&
+ anon >> sc->priority;
+ }
+}
+
/*
* Determine how aggressively the anon and file LRU lists should be
* scanned.
*
- * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
- * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
+ * nr[0] = anon inactive folios to scan; nr[1] = anon active folios to scan
+ * nr[2] = file inactive folios to scan; nr[3] = file active folios to scan
*/
static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long *nr)
@@ -2748,7 +2896,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long ap, fp;
enum lru_list lru;
- /* If we have no swap space, do not bother scanning anon pages. */
+ /* If we have no swap space, do not bother scanning anon folios. */
if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
scan_balance = SCAN_FILE;
goto out;
@@ -2947,6 +3095,2747 @@ static bool can_age_anon_pages(struct pglist_data *pgdat,
return can_demote(pgdat->node_id, sc);
}
+#ifdef CONFIG_LRU_GEN
+
+#ifdef CONFIG_LRU_GEN_ENABLED
+DEFINE_STATIC_KEY_ARRAY_TRUE(lru_gen_caps, NR_LRU_GEN_CAPS);
+#define get_cap(cap) static_branch_likely(&lru_gen_caps[cap])
+#else
+DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS);
+#define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap])
+#endif
+
+/******************************************************************************
+ * shorthand helpers
+ ******************************************************************************/
+
+#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset))
+
+#define DEFINE_MAX_SEQ(lruvec) \
+ unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
+
+#define DEFINE_MIN_SEQ(lruvec) \
+ unsigned long min_seq[ANON_AND_FILE] = { \
+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
+ READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
+ }
+
+#define for_each_gen_type_zone(gen, type, zone) \
+ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \
+ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \
+ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
+
+static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid)
+{
+ struct pglist_data *pgdat = NODE_DATA(nid);
+
+#ifdef CONFIG_MEMCG
+ if (memcg) {
+ struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec;
+
+ /* for hotadd_new_pgdat() */
+ if (!lruvec->pgdat)
+ lruvec->pgdat = pgdat;
+
+ return lruvec;
+ }
+#endif
+ VM_WARN_ON_ONCE(!mem_cgroup_disabled());
+
+ return pgdat ? &pgdat->__lruvec : NULL;
+}
+
+static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
+{
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+ if (!can_demote(pgdat->node_id, sc) &&
+ mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH)
+ return 0;
+
+ return mem_cgroup_swappiness(memcg);
+}
+
+static int get_nr_gens(struct lruvec *lruvec, int type)
+{
+ return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1;
+}
+
+static bool __maybe_unused seq_is_valid(struct lruvec *lruvec)
+{
+ /* see the comment on lru_gen_struct */
+ return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS &&
+ get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) &&
+ get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS;
+}
+
+/******************************************************************************
+ * mm_struct list
+ ******************************************************************************/
+
+static struct lru_gen_mm_list *get_mm_list(struct mem_cgroup *memcg)
+{
+ static struct lru_gen_mm_list mm_list = {
+ .fifo = LIST_HEAD_INIT(mm_list.fifo),
+ .lock = __SPIN_LOCK_UNLOCKED(mm_list.lock),
+ };
+
+#ifdef CONFIG_MEMCG
+ if (memcg)
+ return &memcg->mm_list;
+#endif
+ VM_WARN_ON_ONCE(!mem_cgroup_disabled());
+
+ return &mm_list;
+}
+
+void lru_gen_add_mm(struct mm_struct *mm)
+{
+ int nid;
+ struct mem_cgroup *memcg = get_mem_cgroup_from_mm(mm);
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
+
+ VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list));
+#ifdef CONFIG_MEMCG
+ VM_WARN_ON_ONCE(mm->lru_gen.memcg);
+ mm->lru_gen.memcg = memcg;
+#endif
+ spin_lock(&mm_list->lock);
+
+ for_each_node_state(nid, N_MEMORY) {
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
+
+ if (!lruvec)
+ continue;
+
+ /* the first addition since the last iteration */
+ if (lruvec->mm_state.tail == &mm_list->fifo)
+ lruvec->mm_state.tail = &mm->lru_gen.list;
+ }
+
+ list_add_tail(&mm->lru_gen.list, &mm_list->fifo);
+
+ spin_unlock(&mm_list->lock);
+}
+
+void lru_gen_del_mm(struct mm_struct *mm)
+{
+ int nid;
+ struct lru_gen_mm_list *mm_list;
+ struct mem_cgroup *memcg = NULL;
+
+ if (list_empty(&mm->lru_gen.list))
+ return;
+
+#ifdef CONFIG_MEMCG
+ memcg = mm->lru_gen.memcg;
+#endif
+ mm_list = get_mm_list(memcg);
+
+ spin_lock(&mm_list->lock);
+
+ for_each_node(nid) {
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
+
+ if (!lruvec)
+ continue;
+
+ /* where the last iteration ended (exclusive) */
+ if (lruvec->mm_state.tail == &mm->lru_gen.list)
+ lruvec->mm_state.tail = lruvec->mm_state.tail->next;
+
+ /* where the current iteration continues (inclusive) */
+ if (lruvec->mm_state.head != &mm->lru_gen.list)
+ continue;
+
+ lruvec->mm_state.head = lruvec->mm_state.head->next;
+ /* the deletion ends the current iteration */
+ if (lruvec->mm_state.head == &mm_list->fifo)
+ WRITE_ONCE(lruvec->mm_state.seq, lruvec->mm_state.seq + 1);
+ }
+
+ list_del_init(&mm->lru_gen.list);
+
+ spin_unlock(&mm_list->lock);
+
+#ifdef CONFIG_MEMCG
+ mem_cgroup_put(mm->lru_gen.memcg);
+ mm->lru_gen.memcg = NULL;
+#endif
+}
+
+#ifdef CONFIG_MEMCG
+void lru_gen_migrate_mm(struct mm_struct *mm)
+{
+ struct mem_cgroup *memcg;
+ struct task_struct *task = rcu_dereference_protected(mm->owner, true);
+
+ VM_WARN_ON_ONCE(task->mm != mm);
+ lockdep_assert_held(&task->alloc_lock);
+
+ /* for mm_update_next_owner() */
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = mem_cgroup_from_task(task);
+ rcu_read_unlock();
+ if (memcg == mm->lru_gen.memcg)
+ return;
+
+ VM_WARN_ON_ONCE(!mm->lru_gen.memcg);
+ VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list));
+
+ lru_gen_del_mm(mm);
+ lru_gen_add_mm(mm);
+}
+#endif
+
+/*
+ * Bloom filters with m=1<<15, k=2 and the false positive rates of ~1/5 when
+ * n=10,000 and ~1/2 when n=20,000, where, conventionally, m is the number of
+ * bits in a bitmap, k is the number of hash functions and n is the number of
+ * inserted items.
+ *
+ * Page table walkers use one of the two filters to reduce their search space.
+ * To get rid of non-leaf entries that no longer have enough leaf entries, the
+ * aging uses the double-buffering technique to flip to the other filter each
+ * time it produces a new generation. For non-leaf entries that have enough
+ * leaf entries, the aging carries them over to the next generation in
+ * walk_pmd_range(); the eviction also report them when walking the rmap
+ * in lru_gen_look_around().
+ *
+ * For future optimizations:
+ * 1. It's not necessary to keep both filters all the time. The spare one can be
+ * freed after the RCU grace period and reallocated if needed again.
+ * 2. And when reallocating, it's worth scaling its size according to the number
+ * of inserted entries in the other filter, to reduce the memory overhead on
+ * small systems and false positives on large systems.
+ * 3. Jenkins' hash function is an alternative to Knuth's.
+ */
+#define BLOOM_FILTER_SHIFT 15
+
+static inline int filter_gen_from_seq(unsigned long seq)
+{
+ return seq % NR_BLOOM_FILTERS;
+}
+
+static void get_item_key(void *item, int *key)
+{
+ u32 hash = hash_ptr(item, BLOOM_FILTER_SHIFT * 2);
+
+ BUILD_BUG_ON(BLOOM_FILTER_SHIFT * 2 > BITS_PER_TYPE(u32));
+
+ key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1);
+ key[1] = hash >> BLOOM_FILTER_SHIFT;
+}
+
+static void reset_bloom_filter(struct lruvec *lruvec, unsigned long seq)
+{
+ unsigned long *filter;
+ int gen = filter_gen_from_seq(seq);
+
+ filter = lruvec->mm_state.filters[gen];
+ if (filter) {
+ bitmap_clear(filter, 0, BIT(BLOOM_FILTER_SHIFT));
+ return;
+ }
+
+ filter = bitmap_zalloc(BIT(BLOOM_FILTER_SHIFT),
+ __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ WRITE_ONCE(lruvec->mm_state.filters[gen], filter);
+}
+
+static void update_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+{
+ int key[2];
+ unsigned long *filter;
+ int gen = filter_gen_from_seq(seq);
+
+ filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ if (!filter)
+ return;
+
+ get_item_key(item, key);
+
+ if (!test_bit(key[0], filter))
+ set_bit(key[0], filter);
+ if (!test_bit(key[1], filter))
+ set_bit(key[1], filter);
+}
+
+static bool test_bloom_filter(struct lruvec *lruvec, unsigned long seq, void *item)
+{
+ int key[2];
+ unsigned long *filter;
+ int gen = filter_gen_from_seq(seq);
+
+ filter = READ_ONCE(lruvec->mm_state.filters[gen]);
+ if (!filter)
+ return true;
+
+ get_item_key(item, key);
+
+ return test_bit(key[0], filter) && test_bit(key[1], filter);
+}
+
+static void reset_mm_stats(struct lruvec *lruvec, struct lru_gen_mm_walk *walk, bool last)
+{
+ int i;
+ int hist;
+
+ lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock);
+
+ if (walk) {
+ hist = lru_hist_from_seq(walk->max_seq);
+
+ for (i = 0; i < NR_MM_STATS; i++) {
+ WRITE_ONCE(lruvec->mm_state.stats[hist][i],
+ lruvec->mm_state.stats[hist][i] + walk->mm_stats[i]);
+ walk->mm_stats[i] = 0;
+ }
+ }
+
+ if (NR_HIST_GENS > 1 && last) {
+ hist = lru_hist_from_seq(lruvec->mm_state.seq + 1);
+
+ for (i = 0; i < NR_MM_STATS; i++)
+ WRITE_ONCE(lruvec->mm_state.stats[hist][i], 0);
+ }
+}
+
+static bool should_skip_mm(struct mm_struct *mm, struct lru_gen_mm_walk *walk)
+{
+ int type;
+ unsigned long size = 0;
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+ int key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
+
+ if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap))
+ return true;
+
+ clear_bit(key, &mm->lru_gen.bitmap);
+
+ for (type = !walk->can_swap; type < ANON_AND_FILE; type++) {
+ size += type ? get_mm_counter(mm, MM_FILEPAGES) :
+ get_mm_counter(mm, MM_ANONPAGES) +
+ get_mm_counter(mm, MM_SHMEMPAGES);
+ }
+
+ if (size < MIN_LRU_BATCH)
+ return true;
+
+ return !mmget_not_zero(mm);
+}
+
+static bool iterate_mm_list(struct lruvec *lruvec, struct lru_gen_mm_walk *walk,
+ struct mm_struct **iter)
+{
+ bool first = false;
+ bool last = true;
+ struct mm_struct *mm = NULL;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
+ struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+
+ /*
+ * There are four interesting cases for this page table walker:
+ * 1. It tries to start a new iteration of mm_list with a stale max_seq;
+ * there is nothing left to do.
+ * 2. It's the first of the current generation, and it needs to reset
+ * the Bloom filter for the next generation.
+ * 3. It reaches the end of mm_list, and it needs to increment
+ * mm_state->seq; the iteration is done.
+ * 4. It's the last of the current generation, and it needs to reset the
+ * mm stats counters for the next generation.
+ */
+ spin_lock(&mm_list->lock);
+
+ VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->max_seq);
+ VM_WARN_ON_ONCE(*iter && mm_state->seq > walk->max_seq);
+ VM_WARN_ON_ONCE(*iter && !mm_state->nr_walkers);
+
+ if (walk->max_seq <= mm_state->seq) {
+ if (!*iter)
+ last = false;
+ goto done;
+ }
+
+ if (!mm_state->nr_walkers) {
+ VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo);
+
+ mm_state->head = mm_list->fifo.next;
+ first = true;
+ }
+
+ while (!mm && mm_state->head != &mm_list->fifo) {
+ mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list);
+
+ mm_state->head = mm_state->head->next;
+
+ /* force scan for those added after the last iteration */
+ if (!mm_state->tail || mm_state->tail == &mm->lru_gen.list) {
+ mm_state->tail = mm_state->head;
+ walk->force_scan = true;
+ }
+
+ if (should_skip_mm(mm, walk))
+ mm = NULL;
+ }
+
+ if (mm_state->head == &mm_list->fifo)
+ WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
+done:
+ if (*iter && !mm)
+ mm_state->nr_walkers--;
+ if (!*iter && mm)
+ mm_state->nr_walkers++;
+
+ if (mm_state->nr_walkers)
+ last = false;
+
+ if (*iter || last)
+ reset_mm_stats(lruvec, walk, last);
+
+ spin_unlock(&mm_list->lock);
+
+ if (mm && first)
+ reset_bloom_filter(lruvec, walk->max_seq + 1);
+
+ if (*iter)
+ mmput_async(*iter);
+
+ *iter = mm;
+
+ return last;
+}
+
+static bool iterate_mm_list_nowalk(struct lruvec *lruvec, unsigned long max_seq)
+{
+ bool success = false;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ struct lru_gen_mm_list *mm_list = get_mm_list(memcg);
+ struct lru_gen_mm_state *mm_state = &lruvec->mm_state;
+
+ spin_lock(&mm_list->lock);
+
+ VM_WARN_ON_ONCE(mm_state->seq + 1 < max_seq);
+
+ if (max_seq > mm_state->seq && !mm_state->nr_walkers) {
+ VM_WARN_ON_ONCE(mm_state->head && mm_state->head != &mm_list->fifo);
+
+ WRITE_ONCE(mm_state->seq, mm_state->seq + 1);
+ reset_mm_stats(lruvec, NULL, true);
+ success = true;
+ }
+
+ spin_unlock(&mm_list->lock);
+
+ return success;
+}
+
+/******************************************************************************
+ * refault feedback loop
+ ******************************************************************************/
+
+/*
+ * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
+ *
+ * The P term is refaulted/(evicted+protected) from a tier in the generation
+ * currently being evicted; the I term is the exponential moving average of the
+ * P term over the generations previously evicted, using the smoothing factor
+ * 1/2; the D term isn't supported.
+ *
+ * The setpoint (SP) is always the first tier of one type; the process variable
+ * (PV) is either any tier of the other type or any other tier of the same
+ * type.
+ *
+ * The error is the difference between the SP and the PV; the correction is to
+ * turn off protection when SP>PV or turn on protection when SP<PV.
+ *
+ * For future optimizations:
+ * 1. The D term may discount the other two terms over time so that long-lived
+ * generations can resist stale information.
+ */
+struct ctrl_pos {
+ unsigned long refaulted;
+ unsigned long total;
+ int gain;
+};
+
+static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain,
+ struct ctrl_pos *pos)
+{
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+
+ pos->refaulted = lrugen->avg_refaulted[type][tier] +
+ atomic_long_read(&lrugen->refaulted[hist][type][tier]);
+ pos->total = lrugen->avg_total[type][tier] +
+ atomic_long_read(&lrugen->evicted[hist][type][tier]);
+ if (tier)
+ pos->total += lrugen->protected[hist][type][tier - 1];
+ pos->gain = gain;
+}
+
+static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover)
+{
+ int hist, tier;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1;
+ unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1;
+
+ lockdep_assert_held(&lruvec->lru_lock);
+
+ if (!carryover && !clear)
+ return;
+
+ hist = lru_hist_from_seq(seq);
+
+ for (tier = 0; tier < MAX_NR_TIERS; tier++) {
+ if (carryover) {
+ unsigned long sum;
+
+ sum = lrugen->avg_refaulted[type][tier] +
+ atomic_long_read(&lrugen->refaulted[hist][type][tier]);
+ WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2);
+
+ sum = lrugen->avg_total[type][tier] +
+ atomic_long_read(&lrugen->evicted[hist][type][tier]);
+ if (tier)
+ sum += lrugen->protected[hist][type][tier - 1];
+ WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2);
+ }
+
+ if (clear) {
+ atomic_long_set(&lrugen->refaulted[hist][type][tier], 0);
+ atomic_long_set(&lrugen->evicted[hist][type][tier], 0);
+ if (tier)
+ WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0);
+ }
+ }
+}
+
+static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
+{
+ /*
+ * Return true if the PV has a limited number of refaults or a lower
+ * refaulted/total than the SP.
+ */
+ return pv->refaulted < MIN_LRU_BATCH ||
+ pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <=
+ (sp->refaulted + 1) * pv->total * pv->gain;
+}
+
+/******************************************************************************
+ * the aging
+ ******************************************************************************/
+
+/* promote pages accessed through page tables */
+static int folio_update_gen(struct folio *folio, int gen)
+{
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+
+ VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(!rcu_read_lock_held());
+
+ do {
+ /* lru_gen_del_folio() has isolated this page? */
+ if (!(old_flags & LRU_GEN_MASK)) {
+ /* for shrink_folio_list() */
+ new_flags = old_flags | BIT(PG_referenced);
+ continue;
+ }
+
+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
+ new_flags |= (gen + 1UL) << LRU_GEN_PGOFF;
+ } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+
+ return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+}
+
+/* protect pages accessed multiple times through file descriptors */
+static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
+{
+ int type = folio_is_file_lru(folio);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
+ unsigned long new_flags, old_flags = READ_ONCE(folio->flags);
+
+ VM_WARN_ON_ONCE_FOLIO(!(old_flags & LRU_GEN_MASK), folio);
+
+ do {
+ new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
+ /* folio_update_gen() has promoted this page? */
+ if (new_gen >= 0 && new_gen != old_gen)
+ return new_gen;
+
+ new_gen = (old_gen + 1) % MAX_NR_GENS;
+
+ new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
+ new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF;
+ /* for folio_end_writeback() */
+ if (reclaiming)
+ new_flags |= BIT(PG_reclaim);
+ } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags));
+
+ lru_gen_update_size(lruvec, folio, old_gen, new_gen);
+
+ return new_gen;
+}
+
+static void update_batch_size(struct lru_gen_mm_walk *walk, struct folio *folio,
+ int old_gen, int new_gen)
+{
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+
+ VM_WARN_ON_ONCE(old_gen >= MAX_NR_GENS);
+ VM_WARN_ON_ONCE(new_gen >= MAX_NR_GENS);
+
+ walk->batched++;
+
+ walk->nr_pages[old_gen][type][zone] -= delta;
+ walk->nr_pages[new_gen][type][zone] += delta;
+}
+
+static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk)
+{
+ int gen, type, zone;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ walk->batched = 0;
+
+ for_each_gen_type_zone(gen, type, zone) {
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ int delta = walk->nr_pages[gen][type][zone];
+
+ if (!delta)
+ continue;
+
+ walk->nr_pages[gen][type][zone] = 0;
+ WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
+ lrugen->nr_pages[gen][type][zone] + delta);
+
+ if (lru_gen_is_active(lruvec, gen))
+ lru += LRU_ACTIVE;
+ __update_lru_size(lruvec, lru, zone, delta);
+ }
+}
+
+static int should_skip_vma(unsigned long start, unsigned long end, struct mm_walk *args)
+{
+ struct address_space *mapping;
+ struct vm_area_struct *vma = args->vma;
+ struct lru_gen_mm_walk *walk = args->private;
+
+ if (!vma_is_accessible(vma))
+ return true;
+
+ if (is_vm_hugetlb_page(vma))
+ return true;
+
+ if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL | VM_SEQ_READ | VM_RAND_READ))
+ return true;
+
+ if (vma == get_gate_vma(vma->vm_mm))
+ return true;
+
+ if (vma_is_anonymous(vma))
+ return !walk->can_swap;
+
+ if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping))
+ return true;
+
+ mapping = vma->vm_file->f_mapping;
+ if (mapping_unevictable(mapping))
+ return true;
+
+ if (shmem_mapping(mapping))
+ return !walk->can_swap;
+
+ /* to exclude special mappings like dax, etc. */
+ return !mapping->a_ops->read_folio;
+}
+
+/*
+ * Some userspace memory allocators map many single-page VMAs. Instead of
+ * returning back to the PGD table for each of such VMAs, finish an entire PMD
+ * table to reduce zigzags and improve cache performance.
+ */
+static bool get_next_vma(unsigned long mask, unsigned long size, struct mm_walk *args,
+ unsigned long *vm_start, unsigned long *vm_end)
+{
+ unsigned long start = round_up(*vm_end, size);
+ unsigned long end = (start | ~mask) + 1;
+ VMA_ITERATOR(vmi, args->mm, start);
+
+ VM_WARN_ON_ONCE(mask & size);
+ VM_WARN_ON_ONCE((start & mask) != (*vm_start & mask));
+
+ for_each_vma(vmi, args->vma) {
+ if (end && end <= args->vma->vm_start)
+ return false;
+
+ if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args))
+ continue;
+
+ *vm_start = max(start, args->vma->vm_start);
+ *vm_end = min(end - 1, args->vma->vm_end - 1) + 1;
+
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
+
+ if (!pte_present(pte) || is_zero_pfn(pfn))
+ return -1;
+
+ if (WARN_ON_ONCE(pte_devmap(pte) || pte_special(pte)))
+ return -1;
+
+ if (WARN_ON_ONCE(!pfn_valid(pfn)))
+ return -1;
+
+ return pfn;
+}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
+static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end);
+
+ if (!pmd_present(pmd) || is_huge_zero_pmd(pmd))
+ return -1;
+
+ if (WARN_ON_ONCE(pmd_devmap(pmd)))
+ return -1;
+
+ if (WARN_ON_ONCE(!pfn_valid(pfn)))
+ return -1;
+
+ return pfn;
+}
+#endif
+
+static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg,
+ struct pglist_data *pgdat, bool can_swap)
+{
+ struct folio *folio;
+
+ /* try to avoid unnecessary memory loads */
+ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
+ return NULL;
+
+ folio = pfn_folio(pfn);
+ if (folio_nid(folio) != pgdat->node_id)
+ return NULL;
+
+ if (folio_memcg_rcu(folio) != memcg)
+ return NULL;
+
+ /* file VMAs can contain anon pages from COW */
+ if (!folio_is_file_lru(folio) && !can_swap)
+ return NULL;
+
+ return folio;
+}
+
+static bool suitable_to_scan(int total, int young)
+{
+ int n = clamp_t(int, cache_line_size() / sizeof(pte_t), 2, 8);
+
+ /* suitable if the average number of young PTEs per cacheline is >=1 */
+ return young * n >= total;
+}
+
+static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
+ struct mm_walk *args)
+{
+ int i;
+ pte_t *pte;
+ spinlock_t *ptl;
+ unsigned long addr;
+ int total = 0;
+ int young = 0;
+ struct lru_gen_mm_walk *walk = args->private;
+ struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+ int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
+
+ VM_WARN_ON_ONCE(pmd_leaf(*pmd));
+
+ ptl = pte_lockptr(args->mm, pmd);
+ if (!spin_trylock(ptl))
+ return false;
+
+ arch_enter_lazy_mmu_mode();
+
+ pte = pte_offset_map(pmd, start & PMD_MASK);
+restart:
+ for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
+ unsigned long pfn;
+ struct folio *folio;
+
+ total++;
+ walk->mm_stats[MM_LEAF_TOTAL]++;
+
+ pfn = get_pte_pfn(pte[i], args->vma, addr);
+ if (pfn == -1)
+ continue;
+
+ if (!pte_young(pte[i])) {
+ walk->mm_stats[MM_LEAF_OLD]++;
+ continue;
+ }
+
+ folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
+ if (!folio)
+ continue;
+
+ if (!ptep_test_and_clear_young(args->vma, addr, pte + i))
+ VM_WARN_ON_ONCE(true);
+
+ young++;
+ walk->mm_stats[MM_LEAF_YOUNG]++;
+
+ if (pte_dirty(pte[i]) && !folio_test_dirty(folio) &&
+ !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)))
+ folio_mark_dirty(folio);
+
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen >= 0 && old_gen != new_gen)
+ update_batch_size(walk, folio, old_gen, new_gen);
+ }
+
+ if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
+ goto restart;
+
+ pte_unmap(pte);
+
+ arch_leave_lazy_mmu_mode();
+ spin_unlock(ptl);
+
+ return suitable_to_scan(total, young);
+}
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
+static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
+ struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
+{
+ int i;
+ pmd_t *pmd;
+ spinlock_t *ptl;
+ struct lru_gen_mm_walk *walk = args->private;
+ struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec);
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+ int old_gen, new_gen = lru_gen_from_seq(walk->max_seq);
+
+ VM_WARN_ON_ONCE(pud_leaf(*pud));
+
+ /* try to batch at most 1+MIN_LRU_BATCH+1 entries */
+ if (*start == -1) {
+ *start = next;
+ return;
+ }
+
+ i = next == -1 ? 0 : pmd_index(next) - pmd_index(*start);
+ if (i && i <= MIN_LRU_BATCH) {
+ __set_bit(i - 1, bitmap);
+ return;
+ }
+
+ pmd = pmd_offset(pud, *start);
+
+ ptl = pmd_lockptr(args->mm, pmd);
+ if (!spin_trylock(ptl))
+ goto done;
+
+ arch_enter_lazy_mmu_mode();
+
+ do {
+ unsigned long pfn;
+ struct folio *folio;
+ unsigned long addr = i ? (*start & PMD_MASK) + i * PMD_SIZE : *start;
+
+ pfn = get_pmd_pfn(pmd[i], vma, addr);
+ if (pfn == -1)
+ goto next;
+
+ if (!pmd_trans_huge(pmd[i])) {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) &&
+ get_cap(LRU_GEN_NONLEAF_YOUNG))
+ pmdp_test_and_clear_young(vma, addr, pmd + i);
+ goto next;
+ }
+
+ folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
+ if (!folio)
+ goto next;
+
+ if (!pmdp_test_and_clear_young(vma, addr, pmd + i))
+ goto next;
+
+ walk->mm_stats[MM_LEAF_YOUNG]++;
+
+ if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) &&
+ !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)))
+ folio_mark_dirty(folio);
+
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen >= 0 && old_gen != new_gen)
+ update_batch_size(walk, folio, old_gen, new_gen);
+next:
+ i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1;
+ } while (i <= MIN_LRU_BATCH);
+
+ arch_leave_lazy_mmu_mode();
+ spin_unlock(ptl);
+done:
+ *start = -1;
+ bitmap_zero(bitmap, MIN_LRU_BATCH);
+}
+#else
+static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area_struct *vma,
+ struct mm_walk *args, unsigned long *bitmap, unsigned long *start)
+{
+}
+#endif
+
+static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end,
+ struct mm_walk *args)
+{
+ int i;
+ pmd_t *pmd;
+ unsigned long next;
+ unsigned long addr;
+ struct vm_area_struct *vma;
+ unsigned long pos = -1;
+ struct lru_gen_mm_walk *walk = args->private;
+ unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
+
+ VM_WARN_ON_ONCE(pud_leaf(*pud));
+
+ /*
+ * Finish an entire PMD in two passes: the first only reaches to PTE
+ * tables to avoid taking the PMD lock; the second, if necessary, takes
+ * the PMD lock to clear the accessed bit in PMD entries.
+ */
+ pmd = pmd_offset(pud, start & PUD_MASK);
+restart:
+ /* walk_pte_range() may call get_next_vma() */
+ vma = args->vma;
+ for (i = pmd_index(start), addr = start; addr != end; i++, addr = next) {
+ pmd_t val = pmd_read_atomic(pmd + i);
+
+ /* for pmd_read_atomic() */
+ barrier();
+
+ next = pmd_addr_end(addr, end);
+
+ if (!pmd_present(val) || is_huge_zero_pmd(val)) {
+ walk->mm_stats[MM_LEAF_TOTAL]++;
+ continue;
+ }
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ if (pmd_trans_huge(val)) {
+ unsigned long pfn = pmd_pfn(val);
+ struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
+
+ walk->mm_stats[MM_LEAF_TOTAL]++;
+
+ if (!pmd_young(val)) {
+ walk->mm_stats[MM_LEAF_OLD]++;
+ continue;
+ }
+
+ /* try to avoid unnecessary memory loads */
+ if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
+ continue;
+
+ walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
+ continue;
+ }
+#endif
+ walk->mm_stats[MM_NONLEAF_TOTAL]++;
+
+#ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG
+ if (get_cap(LRU_GEN_NONLEAF_YOUNG)) {
+ if (!pmd_young(val))
+ continue;
+
+ walk_pmd_range_locked(pud, addr, vma, args, bitmap, &pos);
+ }
+#endif
+ if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i))
+ continue;
+
+ walk->mm_stats[MM_NONLEAF_FOUND]++;
+
+ if (!walk_pte_range(&val, addr, next, args))
+ continue;
+
+ walk->mm_stats[MM_NONLEAF_ADDED]++;
+
+ /* carry over to the next generation */
+ update_bloom_filter(walk->lruvec, walk->max_seq + 1, pmd + i);
+ }
+
+ walk_pmd_range_locked(pud, -1, vma, args, bitmap, &pos);
+
+ if (i < PTRS_PER_PMD && get_next_vma(PUD_MASK, PMD_SIZE, args, &start, &end))
+ goto restart;
+}
+
+static int walk_pud_range(p4d_t *p4d, unsigned long start, unsigned long end,
+ struct mm_walk *args)
+{
+ int i;
+ pud_t *pud;
+ unsigned long addr;
+ unsigned long next;
+ struct lru_gen_mm_walk *walk = args->private;
+
+ VM_WARN_ON_ONCE(p4d_leaf(*p4d));
+
+ pud = pud_offset(p4d, start & P4D_MASK);
+restart:
+ for (i = pud_index(start), addr = start; addr != end; i++, addr = next) {
+ pud_t val = READ_ONCE(pud[i]);
+
+ next = pud_addr_end(addr, end);
+
+ if (!pud_present(val) || WARN_ON_ONCE(pud_leaf(val)))
+ continue;
+
+ walk_pmd_range(&val, addr, next, args);
+
+ /* a racy check to curtail the waiting time */
+ if (wq_has_sleeper(&walk->lruvec->mm_state.wait))
+ return 1;
+
+ if (need_resched() || walk->batched >= MAX_LRU_BATCH) {
+ end = (addr | ~PUD_MASK) + 1;
+ goto done;
+ }
+ }
+
+ if (i < PTRS_PER_PUD && get_next_vma(P4D_MASK, PUD_SIZE, args, &start, &end))
+ goto restart;
+
+ end = round_up(end, P4D_SIZE);
+done:
+ if (!end || !args->vma)
+ return 1;
+
+ walk->next_addr = max(end, args->vma->vm_start);
+
+ return -EAGAIN;
+}
+
+static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_mm_walk *walk)
+{
+ static const struct mm_walk_ops mm_walk_ops = {
+ .test_walk = should_skip_vma,
+ .p4d_entry = walk_pud_range,
+ };
+
+ int err;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+
+ walk->next_addr = FIRST_USER_ADDRESS;
+
+ do {
+ err = -EBUSY;
+
+ /* folio_update_gen() requires stable folio_memcg() */
+ if (!mem_cgroup_trylock_pages(memcg))
+ break;
+
+ /* the caller might be holding the lock for write */
+ if (mmap_read_trylock(mm)) {
+ err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk);
+
+ mmap_read_unlock(mm);
+ }
+
+ mem_cgroup_unlock_pages();
+
+ if (walk->batched) {
+ spin_lock_irq(&lruvec->lru_lock);
+ reset_batch_size(lruvec, walk);
+ spin_unlock_irq(&lruvec->lru_lock);
+ }
+
+ cond_resched();
+ } while (err == -EAGAIN);
+}
+
+static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat)
+{
+ struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
+
+ if (pgdat && current_is_kswapd()) {
+ VM_WARN_ON_ONCE(walk);
+
+ walk = &pgdat->mm_walk;
+ } else if (!pgdat && !walk) {
+ VM_WARN_ON_ONCE(current_is_kswapd());
+
+ walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ }
+
+ current->reclaim_state->mm_walk = walk;
+
+ return walk;
+}
+
+static void clear_mm_walk(void)
+{
+ struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk;
+
+ VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages)));
+ VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats)));
+
+ current->reclaim_state->mm_walk = NULL;
+
+ if (!current_is_kswapd())
+ kfree(walk);
+}
+
+static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+{
+ int zone;
+ int remaining = MAX_LRU_BATCH;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]);
+
+ if (type == LRU_GEN_ANON && !can_swap)
+ goto done;
+
+ /* prevent cold/hot inversion if force_scan is true */
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ struct list_head *head = &lrugen->lists[old_gen][type][zone];
+
+ while (!list_empty(head)) {
+ struct folio *folio = lru_to_folio(head);
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
+
+ new_gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->lists[new_gen][type][zone]);
+
+ if (!--remaining)
+ return false;
+ }
+ }
+done:
+ reset_ctrl_pos(lruvec, type, true);
+ WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1);
+
+ return true;
+}
+
+static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
+{
+ int gen, type, zone;
+ bool success = false;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ DEFINE_MIN_SEQ(lruvec);
+
+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+
+ /* find the oldest populated generation */
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) {
+ gen = lru_gen_from_seq(min_seq[type]);
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ if (!list_empty(&lrugen->lists[gen][type][zone]))
+ goto next;
+ }
+
+ min_seq[type]++;
+ }
+next:
+ ;
+ }
+
+ /* see the comment on lru_gen_struct */
+ if (can_swap) {
+ min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]);
+ min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]);
+ }
+
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ if (min_seq[type] == lrugen->min_seq[type])
+ continue;
+
+ reset_ctrl_pos(lruvec, type, true);
+ WRITE_ONCE(lrugen->min_seq[type], min_seq[type]);
+ success = true;
+ }
+
+ return success;
+}
+
+static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan)
+{
+ int prev, next;
+ int type, zone;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ spin_lock_irq(&lruvec->lru_lock);
+
+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+
+ for (type = ANON_AND_FILE - 1; type >= 0; type--) {
+ if (get_nr_gens(lruvec, type) != MAX_NR_GENS)
+ continue;
+
+ VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap));
+
+ while (!inc_min_seq(lruvec, type, can_swap)) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ cond_resched();
+ spin_lock_irq(&lruvec->lru_lock);
+ }
+ }
+
+ /*
+ * Update the active/inactive LRU sizes for compatibility. Both sides of
+ * the current max_seq need to be covered, since max_seq+1 can overlap
+ * with min_seq[LRU_GEN_ANON] if swapping is constrained. And if they do
+ * overlap, cold/hot inversion happens.
+ */
+ prev = lru_gen_from_seq(lrugen->max_seq - 1);
+ next = lru_gen_from_seq(lrugen->max_seq + 1);
+
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+ enum lru_list lru = type * LRU_INACTIVE_FILE;
+ long delta = lrugen->nr_pages[prev][type][zone] -
+ lrugen->nr_pages[next][type][zone];
+
+ if (!delta)
+ continue;
+
+ __update_lru_size(lruvec, lru, zone, delta);
+ __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
+ }
+ }
+
+ for (type = 0; type < ANON_AND_FILE; type++)
+ reset_ctrl_pos(lruvec, type, false);
+
+ WRITE_ONCE(lrugen->timestamps[next], jiffies);
+ /* make sure preceding modifications appear */
+ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1);
+
+ spin_unlock_irq(&lruvec->lru_lock);
+}
+
+static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+ struct scan_control *sc, bool can_swap, bool force_scan)
+{
+ bool success;
+ struct lru_gen_mm_walk *walk;
+ struct mm_struct *mm = NULL;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq));
+
+ /* see the comment in iterate_mm_list() */
+ if (max_seq <= READ_ONCE(lruvec->mm_state.seq)) {
+ success = false;
+ goto done;
+ }
+
+ /*
+ * If the hardware doesn't automatically set the accessed bit, fallback
+ * to lru_gen_look_around(), which only clears the accessed bit in a
+ * handful of PTEs. Spreading the work out over a period of time usually
+ * is less efficient, but it avoids bursty page faults.
+ */
+ if (!force_scan && !(arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))) {
+ success = iterate_mm_list_nowalk(lruvec, max_seq);
+ goto done;
+ }
+
+ walk = set_mm_walk(NULL);
+ if (!walk) {
+ success = iterate_mm_list_nowalk(lruvec, max_seq);
+ goto done;
+ }
+
+ walk->lruvec = lruvec;
+ walk->max_seq = max_seq;
+ walk->can_swap = can_swap;
+ walk->force_scan = force_scan;
+
+ do {
+ success = iterate_mm_list(lruvec, walk, &mm);
+ if (mm)
+ walk_mm(lruvec, mm, walk);
+
+ cond_resched();
+ } while (mm);
+done:
+ if (!success) {
+ if (sc->priority <= DEF_PRIORITY - 2)
+ wait_event_killable(lruvec->mm_state.wait,
+ max_seq < READ_ONCE(lrugen->max_seq));
+
+ return max_seq < READ_ONCE(lrugen->max_seq);
+ }
+
+ VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq));
+
+ inc_max_seq(lruvec, can_swap, force_scan);
+ /* either this sees any waiters or they will see updated max_seq */
+ if (wq_has_sleeper(&lruvec->mm_state.wait))
+ wake_up_all(&lruvec->mm_state.wait);
+
+ return true;
+}
+
+static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq,
+ struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
+{
+ int gen, type, zone;
+ unsigned long old = 0;
+ unsigned long young = 0;
+ unsigned long total = 0;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+
+ for (type = !can_swap; type < ANON_AND_FILE; type++) {
+ unsigned long seq;
+
+ for (seq = min_seq[type]; seq <= max_seq; seq++) {
+ unsigned long size = 0;
+
+ gen = lru_gen_from_seq(seq);
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
+
+ total += size;
+ if (seq == max_seq)
+ young += size;
+ else if (seq + MIN_NR_GENS == max_seq)
+ old += size;
+ }
+ }
+
+ /* try to scrape all its memory if this memcg was deleted */
+ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
+
+ /*
+ * The aging tries to be lazy to reduce the overhead, while the eviction
+ * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
+ * ideal number of generations is MIN_NR_GENS+1.
+ */
+ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq)
+ return true;
+ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
+ return false;
+
+ /*
+ * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
+ * of the total number of pages for each generation. A reasonable range
+ * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
+ * aging cares about the upper bound of hot pages, while the eviction
+ * cares about the lower bound of cold pages.
+ */
+ if (young * MIN_NR_GENS > total)
+ return true;
+ if (old * (MIN_NR_GENS + 2) < total)
+ return true;
+
+ return false;
+}
+
+static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned long min_ttl)
+{
+ bool need_aging;
+ unsigned long nr_to_scan;
+ int swappiness = get_swappiness(lruvec, sc);
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ VM_WARN_ON_ONCE(sc->memcg_low_reclaim);
+
+ mem_cgroup_calculate_protection(NULL, memcg);
+
+ if (mem_cgroup_below_min(memcg))
+ return false;
+
+ need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan);
+
+ if (min_ttl) {
+ int gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]);
+ unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
+
+ if (time_is_after_jiffies(birth + min_ttl))
+ return false;
+
+ /* the size is likely too small to be helpful */
+ if (!nr_to_scan && sc->priority != DEF_PRIORITY)
+ return false;
+ }
+
+ if (need_aging)
+ try_to_inc_max_seq(lruvec, max_seq, sc, swappiness, false);
+
+ return true;
+}
+
+/* to protect the working set of the last N jiffies */
+static unsigned long lru_gen_min_ttl __read_mostly;
+
+static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+{
+ struct mem_cgroup *memcg;
+ bool success = false;
+ unsigned long min_ttl = READ_ONCE(lru_gen_min_ttl);
+
+ VM_WARN_ON_ONCE(!current_is_kswapd());
+
+ sc->last_reclaimed = sc->nr_reclaimed;
+
+ /*
+ * To reduce the chance of going into the aging path, which can be
+ * costly, optimistically skip it if the flag below was cleared in the
+ * eviction path. This improves the overall performance when multiple
+ * memcgs are available.
+ */
+ if (!sc->memcgs_need_aging) {
+ sc->memcgs_need_aging = true;
+ return;
+ }
+
+ set_mm_walk(pgdat);
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+
+ if (age_lruvec(lruvec, sc, min_ttl))
+ success = true;
+
+ cond_resched();
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ clear_mm_walk();
+
+ /* check the order to exclude compaction-induced reclaim */
+ if (success || !min_ttl || sc->order)
+ return;
+
+ /*
+ * The main goal is to OOM kill if every generation from all memcgs is
+ * younger than min_ttl. However, another possibility is all memcgs are
+ * either below min or empty.
+ */
+ if (mutex_trylock(&oom_lock)) {
+ struct oom_control oc = {
+ .gfp_mask = sc->gfp_mask,
+ };
+
+ out_of_memory(&oc);
+
+ mutex_unlock(&oom_lock);
+ }
+}
+
+/*
+ * This function exploits spatial locality when shrink_folio_list() walks the
+ * rmap. It scans the adjacent PTEs of a young PTE and promotes hot pages. If
+ * the scan was done cacheline efficiently, it adds the PMD entry pointing to
+ * the PTE table to the Bloom filter. This forms a feedback loop between the
+ * eviction and the aging.
+ */
+void lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
+{
+ int i;
+ pte_t *pte;
+ unsigned long start;
+ unsigned long end;
+ unsigned long addr;
+ struct lru_gen_mm_walk *walk;
+ int young = 0;
+ unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {};
+ struct folio *folio = pfn_folio(pvmw->pfn);
+ struct mem_cgroup *memcg = folio_memcg(folio);
+ struct pglist_data *pgdat = folio_pgdat(folio);
+ struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ DEFINE_MAX_SEQ(lruvec);
+ int old_gen, new_gen = lru_gen_from_seq(max_seq);
+
+ lockdep_assert_held(pvmw->ptl);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio);
+
+ if (spin_is_contended(pvmw->ptl))
+ return;
+
+ /* avoid taking the LRU lock under the PTL when possible */
+ walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL;
+
+ start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start);
+ end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1;
+
+ if (end - start > MIN_LRU_BATCH * PAGE_SIZE) {
+ if (pvmw->address - start < MIN_LRU_BATCH * PAGE_SIZE / 2)
+ end = start + MIN_LRU_BATCH * PAGE_SIZE;
+ else if (end - pvmw->address < MIN_LRU_BATCH * PAGE_SIZE / 2)
+ start = end - MIN_LRU_BATCH * PAGE_SIZE;
+ else {
+ start = pvmw->address - MIN_LRU_BATCH * PAGE_SIZE / 2;
+ end = pvmw->address + MIN_LRU_BATCH * PAGE_SIZE / 2;
+ }
+ }
+
+ pte = pvmw->pte - (pvmw->address - start) / PAGE_SIZE;
+
+ rcu_read_lock();
+ arch_enter_lazy_mmu_mode();
+
+ for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) {
+ unsigned long pfn;
+
+ pfn = get_pte_pfn(pte[i], pvmw->vma, addr);
+ if (pfn == -1)
+ continue;
+
+ if (!pte_young(pte[i]))
+ continue;
+
+ folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap);
+ if (!folio)
+ continue;
+
+ if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i))
+ VM_WARN_ON_ONCE(true);
+
+ young++;
+
+ if (pte_dirty(pte[i]) && !folio_test_dirty(folio) &&
+ !(folio_test_anon(folio) && folio_test_swapbacked(folio) &&
+ !folio_test_swapcache(folio)))
+ folio_mark_dirty(folio);
+
+ old_gen = folio_lru_gen(folio);
+ if (old_gen < 0)
+ folio_set_referenced(folio);
+ else if (old_gen != new_gen)
+ __set_bit(i, bitmap);
+ }
+
+ arch_leave_lazy_mmu_mode();
+ rcu_read_unlock();
+
+ /* feedback from rmap walkers to page table walkers */
+ if (suitable_to_scan(i, young))
+ update_bloom_filter(lruvec, max_seq, pvmw->pmd);
+
+ if (!walk && bitmap_weight(bitmap, MIN_LRU_BATCH) < PAGEVEC_SIZE) {
+ for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
+ folio = pfn_folio(pte_pfn(pte[i]));
+ folio_activate(folio);
+ }
+ return;
+ }
+
+ /* folio_update_gen() requires stable folio_memcg() */
+ if (!mem_cgroup_trylock_pages(memcg))
+ return;
+
+ if (!walk) {
+ spin_lock_irq(&lruvec->lru_lock);
+ new_gen = lru_gen_from_seq(lruvec->lrugen.max_seq);
+ }
+
+ for_each_set_bit(i, bitmap, MIN_LRU_BATCH) {
+ folio = pfn_folio(pte_pfn(pte[i]));
+ if (folio_memcg_rcu(folio) != memcg)
+ continue;
+
+ old_gen = folio_update_gen(folio, new_gen);
+ if (old_gen < 0 || old_gen == new_gen)
+ continue;
+
+ if (walk)
+ update_batch_size(walk, folio, old_gen, new_gen);
+ else
+ lru_gen_update_size(lruvec, folio, old_gen, new_gen);
+ }
+
+ if (!walk)
+ spin_unlock_irq(&lruvec->lru_lock);
+
+ mem_cgroup_unlock_pages();
+}
+
+/******************************************************************************
+ * the eviction
+ ******************************************************************************/
+
+static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx)
+{
+ bool success;
+ int gen = folio_lru_gen(folio);
+ int type = folio_is_file_lru(folio);
+ int zone = folio_zonenum(folio);
+ int delta = folio_nr_pages(folio);
+ int refs = folio_lru_refs(folio);
+ int tier = lru_tier_from_refs(refs);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio);
+
+ /* unevictable */
+ if (!folio_evictable(folio)) {
+ success = lru_gen_del_folio(lruvec, folio, true);
+ VM_WARN_ON_ONCE_FOLIO(!success, folio);
+ folio_set_unevictable(folio);
+ lruvec_add_folio(lruvec, folio);
+ __count_vm_events(UNEVICTABLE_PGCULLED, delta);
+ return true;
+ }
+
+ /* dirty lazyfree */
+ if (type == LRU_GEN_FILE && folio_test_anon(folio) && folio_test_dirty(folio)) {
+ success = lru_gen_del_folio(lruvec, folio, true);
+ VM_WARN_ON_ONCE_FOLIO(!success, folio);
+ folio_set_swapbacked(folio);
+ lruvec_add_folio_tail(lruvec, folio);
+ return true;
+ }
+
+ /* promoted */
+ if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
+ list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ return true;
+ }
+
+ /* protected */
+ if (tier > tier_idx) {
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+
+ gen = folio_inc_gen(lruvec, folio, false);
+ list_move_tail(&folio->lru, &lrugen->lists[gen][type][zone]);
+
+ WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
+ lrugen->protected[hist][type][tier - 1] + delta);
+ __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta);
+ return true;
+ }
+
+ /* waiting for writeback */
+ if (folio_test_locked(folio) || folio_test_writeback(folio) ||
+ (type == LRU_GEN_FILE && folio_test_dirty(folio))) {
+ gen = folio_inc_gen(lruvec, folio, true);
+ list_move(&folio->lru, &lrugen->lists[gen][type][zone]);
+ return true;
+ }
+
+ return false;
+}
+
+static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc)
+{
+ bool success;
+
+ /* unmapping inhibited */
+ if (!sc->may_unmap && folio_mapped(folio))
+ return false;
+
+ /* swapping inhibited */
+ if (!(sc->may_writepage && (sc->gfp_mask & __GFP_IO)) &&
+ (folio_test_dirty(folio) ||
+ (folio_test_anon(folio) && !folio_test_swapcache(folio))))
+ return false;
+
+ /* raced with release_pages() */
+ if (!folio_try_get(folio))
+ return false;
+
+ /* raced with another isolation */
+ if (!folio_test_clear_lru(folio)) {
+ folio_put(folio);
+ return false;
+ }
+
+ /* see the comment on MAX_NR_TIERS */
+ if (!folio_test_referenced(folio))
+ set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0);
+
+ /* for shrink_folio_list() */
+ folio_clear_reclaim(folio);
+ folio_clear_referenced(folio);
+
+ success = lru_gen_del_folio(lruvec, folio, true);
+ VM_WARN_ON_ONCE_FOLIO(!success, folio);
+
+ return true;
+}
+
+static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
+ int type, int tier, struct list_head *list)
+{
+ int gen, zone;
+ enum vm_event_item item;
+ int sorted = 0;
+ int scanned = 0;
+ int isolated = 0;
+ int remaining = MAX_LRU_BATCH;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+
+ VM_WARN_ON_ONCE(!list_empty(list));
+
+ if (get_nr_gens(lruvec, type) == MIN_NR_GENS)
+ return 0;
+
+ gen = lru_gen_from_seq(lrugen->min_seq[type]);
+
+ for (zone = sc->reclaim_idx; zone >= 0; zone--) {
+ LIST_HEAD(moved);
+ int skipped = 0;
+ struct list_head *head = &lrugen->lists[gen][type][zone];
+
+ while (!list_empty(head)) {
+ struct folio *folio = lru_to_folio(head);
+ int delta = folio_nr_pages(folio);
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
+
+ scanned += delta;
+
+ if (sort_folio(lruvec, folio, tier))
+ sorted += delta;
+ else if (isolate_folio(lruvec, folio, sc)) {
+ list_add(&folio->lru, list);
+ isolated += delta;
+ } else {
+ list_move(&folio->lru, &moved);
+ skipped += delta;
+ }
+
+ if (!--remaining || max(isolated, skipped) >= MIN_LRU_BATCH)
+ break;
+ }
+
+ if (skipped) {
+ list_splice(&moved, head);
+ __count_zid_vm_events(PGSCAN_SKIP, zone, skipped);
+ }
+
+ if (!remaining || isolated >= MIN_LRU_BATCH)
+ break;
+ }
+
+ item = current_is_kswapd() ? PGSCAN_KSWAPD : PGSCAN_DIRECT;
+ if (!cgroup_reclaim(sc)) {
+ __count_vm_events(item, isolated);
+ __count_vm_events(PGREFILL, sorted);
+ }
+ __count_memcg_events(memcg, item, isolated);
+ __count_memcg_events(memcg, PGREFILL, sorted);
+ __count_vm_events(PGSCAN_ANON + type, isolated);
+
+ /*
+ * There might not be eligible pages due to reclaim_idx, may_unmap and
+ * may_writepage. Check the remaining to prevent livelock if it's not
+ * making progress.
+ */
+ return isolated || !remaining ? scanned : 0;
+}
+
+static int get_tier_idx(struct lruvec *lruvec, int type)
+{
+ int tier;
+ struct ctrl_pos sp, pv;
+
+ /*
+ * To leave a margin for fluctuations, use a larger gain factor (1:2).
+ * This value is chosen because any other tier would have at least twice
+ * as many refaults as the first tier.
+ */
+ read_ctrl_pos(lruvec, type, 0, 1, &sp);
+ for (tier = 1; tier < MAX_NR_TIERS; tier++) {
+ read_ctrl_pos(lruvec, type, tier, 2, &pv);
+ if (!positive_ctrl_err(&sp, &pv))
+ break;
+ }
+
+ return tier - 1;
+}
+
+static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx)
+{
+ int type, tier;
+ struct ctrl_pos sp, pv;
+ int gain[ANON_AND_FILE] = { swappiness, 200 - swappiness };
+
+ /*
+ * Compare the first tier of anon with that of file to determine which
+ * type to scan. Also need to compare other tiers of the selected type
+ * with the first tier of the other type to determine the last tier (of
+ * the selected type) to evict.
+ */
+ read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp);
+ read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv);
+ type = positive_ctrl_err(&sp, &pv);
+
+ read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp);
+ for (tier = 1; tier < MAX_NR_TIERS; tier++) {
+ read_ctrl_pos(lruvec, type, tier, gain[type], &pv);
+ if (!positive_ctrl_err(&sp, &pv))
+ break;
+ }
+
+ *tier_idx = tier - 1;
+
+ return type;
+}
+
+static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
+ int *type_scanned, struct list_head *list)
+{
+ int i;
+ int type;
+ int scanned;
+ int tier = -1;
+ DEFINE_MIN_SEQ(lruvec);
+
+ /*
+ * Try to make the obvious choice first. When anon and file are both
+ * available from the same generation, interpret swappiness 1 as file
+ * first and 200 as anon first.
+ */
+ if (!swappiness)
+ type = LRU_GEN_FILE;
+ else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE])
+ type = LRU_GEN_ANON;
+ else if (swappiness == 1)
+ type = LRU_GEN_FILE;
+ else if (swappiness == 200)
+ type = LRU_GEN_ANON;
+ else
+ type = get_type_to_scan(lruvec, swappiness, &tier);
+
+ for (i = !swappiness; i < ANON_AND_FILE; i++) {
+ if (tier < 0)
+ tier = get_tier_idx(lruvec, type);
+
+ scanned = scan_folios(lruvec, sc, type, tier, list);
+ if (scanned)
+ break;
+
+ type = !type;
+ tier = -1;
+ }
+
+ *type_scanned = type;
+
+ return scanned;
+}
+
+static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness,
+ bool *need_swapping)
+{
+ int type;
+ int scanned;
+ int reclaimed;
+ LIST_HEAD(list);
+ struct folio *folio;
+ enum vm_event_item item;
+ struct reclaim_stat stat;
+ struct lru_gen_mm_walk *walk;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ struct pglist_data *pgdat = lruvec_pgdat(lruvec);
+
+ spin_lock_irq(&lruvec->lru_lock);
+
+ scanned = isolate_folios(lruvec, sc, swappiness, &type, &list);
+
+ scanned += try_to_inc_min_seq(lruvec, swappiness);
+
+ if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS)
+ scanned = 0;
+
+ spin_unlock_irq(&lruvec->lru_lock);
+
+ if (list_empty(&list))
+ return scanned;
+
+ reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
+
+ list_for_each_entry(folio, &list, lru) {
+ /* restore LRU_REFS_FLAGS cleared by isolate_folio() */
+ if (folio_test_workingset(folio))
+ folio_set_referenced(folio);
+
+ /* don't add rejected pages to the oldest generation */
+ if (folio_test_reclaim(folio) &&
+ (folio_test_dirty(folio) || folio_test_writeback(folio)))
+ folio_clear_active(folio);
+ else
+ folio_set_active(folio);
+ }
+
+ spin_lock_irq(&lruvec->lru_lock);
+
+ move_folios_to_lru(lruvec, &list);
+
+ walk = current->reclaim_state->mm_walk;
+ if (walk && walk->batched)
+ reset_batch_size(lruvec, walk);
+
+ item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT;
+ if (!cgroup_reclaim(sc))
+ __count_vm_events(item, reclaimed);
+ __count_memcg_events(memcg, item, reclaimed);
+ __count_vm_events(PGSTEAL_ANON + type, reclaimed);
+
+ spin_unlock_irq(&lruvec->lru_lock);
+
+ mem_cgroup_uncharge_list(&list);
+ free_unref_page_list(&list);
+
+ sc->nr_reclaimed += reclaimed;
+
+ if (need_swapping && type == LRU_GEN_ANON)
+ *need_swapping = true;
+
+ return scanned;
+}
+
+/*
+ * For future optimizations:
+ * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
+ * reclaim.
+ */
+static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc,
+ bool can_swap, bool *need_aging)
+{
+ unsigned long nr_to_scan;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (mem_cgroup_below_min(memcg) ||
+ (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim))
+ return 0;
+
+ *need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, can_swap, &nr_to_scan);
+ if (!*need_aging)
+ return nr_to_scan;
+
+ /* skip the aging path at the default priority */
+ if (sc->priority == DEF_PRIORITY)
+ goto done;
+
+ /* leave the work to lru_gen_age_node() */
+ if (current_is_kswapd())
+ return 0;
+
+ if (try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, false))
+ return nr_to_scan;
+done:
+ return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0;
+}
+
+static bool should_abort_scan(struct lruvec *lruvec, unsigned long seq,
+ struct scan_control *sc, bool need_swapping)
+{
+ int i;
+ DEFINE_MAX_SEQ(lruvec);
+
+ if (!current_is_kswapd()) {
+ /* age each memcg at most once to ensure fairness */
+ if (max_seq - seq > 1)
+ return true;
+
+ /* over-swapping can increase allocation latency */
+ if (sc->nr_reclaimed >= sc->nr_to_reclaim && need_swapping)
+ return true;
+
+ /* give this thread a chance to exit and free its memory */
+ if (fatal_signal_pending(current)) {
+ sc->nr_reclaimed += MIN_LRU_BATCH;
+ return true;
+ }
+
+ if (cgroup_reclaim(sc))
+ return false;
+ } else if (sc->nr_reclaimed - sc->last_reclaimed < sc->nr_to_reclaim)
+ return false;
+
+ /* keep scanning at low priorities to ensure fairness */
+ if (sc->priority > DEF_PRIORITY - 2)
+ return false;
+
+ /*
+ * A minimum amount of work was done under global memory pressure. For
+ * kswapd, it may be overshooting. For direct reclaim, the allocation
+ * may succeed if all suitable zones are somewhat safe. In either case,
+ * it's better to stop now, and restart later if necessary.
+ */
+ for (i = 0; i <= sc->reclaim_idx; i++) {
+ unsigned long wmark;
+ struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
+
+ if (!managed_zone(zone))
+ continue;
+
+ wmark = current_is_kswapd() ? high_wmark_pages(zone) : low_wmark_pages(zone);
+ if (wmark > zone_page_state(zone, NR_FREE_PAGES))
+ return false;
+ }
+
+ sc->nr_reclaimed += MIN_LRU_BATCH;
+
+ return true;
+}
+
+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+{
+ struct blk_plug plug;
+ bool need_aging = false;
+ bool need_swapping = false;
+ unsigned long scanned = 0;
+ unsigned long reclaimed = sc->nr_reclaimed;
+ DEFINE_MAX_SEQ(lruvec);
+
+ lru_add_drain();
+
+ blk_start_plug(&plug);
+
+ set_mm_walk(lruvec_pgdat(lruvec));
+
+ while (true) {
+ int delta;
+ int swappiness;
+ unsigned long nr_to_scan;
+
+ if (sc->may_swap)
+ swappiness = get_swappiness(lruvec, sc);
+ else if (!cgroup_reclaim(sc) && get_swappiness(lruvec, sc))
+ swappiness = 1;
+ else
+ swappiness = 0;
+
+ nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness, &need_aging);
+ if (!nr_to_scan)
+ goto done;
+
+ delta = evict_folios(lruvec, sc, swappiness, &need_swapping);
+ if (!delta)
+ goto done;
+
+ scanned += delta;
+ if (scanned >= nr_to_scan)
+ break;
+
+ if (should_abort_scan(lruvec, max_seq, sc, need_swapping))
+ break;
+
+ cond_resched();
+ }
+
+ /* see the comment in lru_gen_age_node() */
+ if (sc->nr_reclaimed - reclaimed >= MIN_LRU_BATCH && !need_aging)
+ sc->memcgs_need_aging = false;
+done:
+ clear_mm_walk();
+
+ blk_finish_plug(&plug);
+}
+
+/******************************************************************************
+ * state change
+ ******************************************************************************/
+
+static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
+{
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ if (lrugen->enabled) {
+ enum lru_list lru;
+
+ for_each_evictable_lru(lru) {
+ if (!list_empty(&lruvec->lists[lru]))
+ return false;
+ }
+ } else {
+ int gen, type, zone;
+
+ for_each_gen_type_zone(gen, type, zone) {
+ if (!list_empty(&lrugen->lists[gen][type][zone]))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool fill_evictable(struct lruvec *lruvec)
+{
+ enum lru_list lru;
+ int remaining = MAX_LRU_BATCH;
+
+ for_each_evictable_lru(lru) {
+ int type = is_file_lru(lru);
+ bool active = is_active_lru(lru);
+ struct list_head *head = &lruvec->lists[lru];
+
+ while (!list_empty(head)) {
+ bool success;
+ struct folio *folio = lru_to_folio(head);
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio) != active, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio);
+
+ lruvec_del_folio(lruvec, folio);
+ success = lru_gen_add_folio(lruvec, folio, false);
+ VM_WARN_ON_ONCE(!success);
+
+ if (!--remaining)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool drain_evictable(struct lruvec *lruvec)
+{
+ int gen, type, zone;
+ int remaining = MAX_LRU_BATCH;
+
+ for_each_gen_type_zone(gen, type, zone) {
+ struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
+
+ while (!list_empty(head)) {
+ bool success;
+ struct folio *folio = lru_to_folio(head);
+
+ VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_file_lru(folio) != type, folio);
+ VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
+
+ success = lru_gen_del_folio(lruvec, folio, false);
+ VM_WARN_ON_ONCE(!success);
+ lruvec_add_folio(lruvec, folio);
+
+ if (!--remaining)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void lru_gen_change_state(bool enabled)
+{
+ static DEFINE_MUTEX(state_mutex);
+
+ struct mem_cgroup *memcg;
+
+ cgroup_lock();
+ cpus_read_lock();
+ get_online_mems();
+ mutex_lock(&state_mutex);
+
+ if (enabled == lru_gen_enabled())
+ goto unlock;
+
+ if (enabled)
+ static_branch_enable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
+ else
+ static_branch_disable_cpuslocked(&lru_gen_caps[LRU_GEN_CORE]);
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ int nid;
+
+ for_each_node(nid) {
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
+
+ if (!lruvec)
+ continue;
+
+ spin_lock_irq(&lruvec->lru_lock);
+
+ VM_WARN_ON_ONCE(!seq_is_valid(lruvec));
+ VM_WARN_ON_ONCE(!state_is_valid(lruvec));
+
+ lruvec->lrugen.enabled = enabled;
+
+ while (!(enabled ? fill_evictable(lruvec) : drain_evictable(lruvec))) {
+ spin_unlock_irq(&lruvec->lru_lock);
+ cond_resched();
+ spin_lock_irq(&lruvec->lru_lock);
+ }
+
+ spin_unlock_irq(&lruvec->lru_lock);
+ }
+
+ cond_resched();
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+unlock:
+ mutex_unlock(&state_mutex);
+ put_online_mems();
+ cpus_read_unlock();
+ cgroup_unlock();
+}
+
+/******************************************************************************
+ * sysfs interface
+ ******************************************************************************/
+
+static ssize_t show_min_ttl(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", jiffies_to_msecs(READ_ONCE(lru_gen_min_ttl)));
+}
+
+/* see Documentation/admin-guide/mm/multigen_lru.rst for details */
+static ssize_t store_min_ttl(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ unsigned int msecs;
+
+ if (kstrtouint(buf, 0, &msecs))
+ return -EINVAL;
+
+ WRITE_ONCE(lru_gen_min_ttl, msecs_to_jiffies(msecs));
+
+ return len;
+}
+
+static struct kobj_attribute lru_gen_min_ttl_attr = __ATTR(
+ min_ttl_ms, 0644, show_min_ttl, store_min_ttl
+);
+
+static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ unsigned int caps = 0;
+
+ if (get_cap(LRU_GEN_CORE))
+ caps |= BIT(LRU_GEN_CORE);
+
+ if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK))
+ caps |= BIT(LRU_GEN_MM_WALK);
+
+ if (IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) && get_cap(LRU_GEN_NONLEAF_YOUNG))
+ caps |= BIT(LRU_GEN_NONLEAF_YOUNG);
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps);
+}
+
+/* see Documentation/admin-guide/mm/multigen_lru.rst for details */
+static ssize_t store_enabled(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t len)
+{
+ int i;
+ unsigned int caps;
+
+ if (tolower(*buf) == 'n')
+ caps = 0;
+ else if (tolower(*buf) == 'y')
+ caps = -1;
+ else if (kstrtouint(buf, 0, &caps))
+ return -EINVAL;
+
+ for (i = 0; i < NR_LRU_GEN_CAPS; i++) {
+ bool enabled = caps & BIT(i);
+
+ if (i == LRU_GEN_CORE)
+ lru_gen_change_state(enabled);
+ else if (enabled)
+ static_branch_enable(&lru_gen_caps[i]);
+ else
+ static_branch_disable(&lru_gen_caps[i]);
+ }
+
+ return len;
+}
+
+static struct kobj_attribute lru_gen_enabled_attr = __ATTR(
+ enabled, 0644, show_enabled, store_enabled
+);
+
+static struct attribute *lru_gen_attrs[] = {
+ &lru_gen_min_ttl_attr.attr,
+ &lru_gen_enabled_attr.attr,
+ NULL
+};
+
+static struct attribute_group lru_gen_attr_group = {
+ .name = "lru_gen",
+ .attrs = lru_gen_attrs,
+};
+
+/******************************************************************************
+ * debugfs interface
+ ******************************************************************************/
+
+static void *lru_gen_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct mem_cgroup *memcg;
+ loff_t nr_to_skip = *pos;
+
+ m->private = kvmalloc(PATH_MAX, GFP_KERNEL);
+ if (!m->private)
+ return ERR_PTR(-ENOMEM);
+
+ memcg = mem_cgroup_iter(NULL, NULL, NULL);
+ do {
+ int nid;
+
+ for_each_node_state(nid, N_MEMORY) {
+ if (!nr_to_skip--)
+ return get_lruvec(memcg, nid);
+ }
+ } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
+
+ return NULL;
+}
+
+static void lru_gen_seq_stop(struct seq_file *m, void *v)
+{
+ if (!IS_ERR_OR_NULL(v))
+ mem_cgroup_iter_break(NULL, lruvec_memcg(v));
+
+ kvfree(m->private);
+ m->private = NULL;
+}
+
+static void *lru_gen_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ int nid = lruvec_pgdat(v)->node_id;
+ struct mem_cgroup *memcg = lruvec_memcg(v);
+
+ ++*pos;
+
+ nid = next_memory_node(nid);
+ if (nid == MAX_NUMNODES) {
+ memcg = mem_cgroup_iter(NULL, memcg, NULL);
+ if (!memcg)
+ return NULL;
+
+ nid = first_memory_node;
+ }
+
+ return get_lruvec(memcg, nid);
+}
+
+static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec,
+ unsigned long max_seq, unsigned long *min_seq,
+ unsigned long seq)
+{
+ int i;
+ int type, tier;
+ int hist = lru_hist_from_seq(seq);
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ for (tier = 0; tier < MAX_NR_TIERS; tier++) {
+ seq_printf(m, " %10d", tier);
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ const char *s = " ";
+ unsigned long n[3] = {};
+
+ if (seq == max_seq) {
+ s = "RT ";
+ n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]);
+ n[1] = READ_ONCE(lrugen->avg_total[type][tier]);
+ } else if (seq == min_seq[type] || NR_HIST_GENS > 1) {
+ s = "rep";
+ n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]);
+ n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]);
+ if (tier)
+ n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]);
+ }
+
+ for (i = 0; i < 3; i++)
+ seq_printf(m, " %10lu%c", n[i], s[i]);
+ }
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, " ");
+ for (i = 0; i < NR_MM_STATS; i++) {
+ const char *s = " ";
+ unsigned long n = 0;
+
+ if (seq == max_seq && NR_HIST_GENS == 1) {
+ s = "LOYNFA";
+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ } else if (seq != max_seq && NR_HIST_GENS > 1) {
+ s = "loynfa";
+ n = READ_ONCE(lruvec->mm_state.stats[hist][i]);
+ }
+
+ seq_printf(m, " %10lu%c", n, s[i]);
+ }
+ seq_putc(m, '\n');
+}
+
+/* see Documentation/admin-guide/mm/multigen_lru.rst for details */
+static int lru_gen_seq_show(struct seq_file *m, void *v)
+{
+ unsigned long seq;
+ bool full = !debugfs_real_fops(m->file)->write;
+ struct lruvec *lruvec = v;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+ int nid = lruvec_pgdat(lruvec)->node_id;
+ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (nid == first_memory_node) {
+ const char *path = memcg ? m->private : "";
+
+#ifdef CONFIG_MEMCG
+ if (memcg)
+ cgroup_path(memcg->css.cgroup, m->private, PATH_MAX);
+#endif
+ seq_printf(m, "memcg %5hu %s\n", mem_cgroup_id(memcg), path);
+ }
+
+ seq_printf(m, " node %5d\n", nid);
+
+ if (!full)
+ seq = min_seq[LRU_GEN_ANON];
+ else if (max_seq >= MAX_NR_GENS)
+ seq = max_seq - MAX_NR_GENS + 1;
+ else
+ seq = 0;
+
+ for (; seq <= max_seq; seq++) {
+ int type, zone;
+ int gen = lru_gen_from_seq(seq);
+ unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]);
+
+ seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth));
+
+ for (type = 0; type < ANON_AND_FILE; type++) {
+ unsigned long size = 0;
+ char mark = full && seq < min_seq[type] ? 'x' : ' ';
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++)
+ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
+
+ seq_printf(m, " %10lu%c", size, mark);
+ }
+
+ seq_putc(m, '\n');
+
+ if (full)
+ lru_gen_seq_show_full(m, lruvec, max_seq, min_seq, seq);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations lru_gen_seq_ops = {
+ .start = lru_gen_seq_start,
+ .stop = lru_gen_seq_stop,
+ .next = lru_gen_seq_next,
+ .show = lru_gen_seq_show,
+};
+
+static int run_aging(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
+ bool can_swap, bool force_scan)
+{
+ DEFINE_MAX_SEQ(lruvec);
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (seq < max_seq)
+ return 0;
+
+ if (seq > max_seq)
+ return -EINVAL;
+
+ if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq)
+ return -ERANGE;
+
+ try_to_inc_max_seq(lruvec, max_seq, sc, can_swap, force_scan);
+
+ return 0;
+}
+
+static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc,
+ int swappiness, unsigned long nr_to_reclaim)
+{
+ DEFINE_MAX_SEQ(lruvec);
+
+ if (seq + MIN_NR_GENS > max_seq)
+ return -EINVAL;
+
+ sc->nr_reclaimed = 0;
+
+ while (!signal_pending(current)) {
+ DEFINE_MIN_SEQ(lruvec);
+
+ if (seq < min_seq[!swappiness])
+ return 0;
+
+ if (sc->nr_reclaimed >= nr_to_reclaim)
+ return 0;
+
+ if (!evict_folios(lruvec, sc, swappiness, NULL))
+ return 0;
+
+ cond_resched();
+ }
+
+ return -EINTR;
+}
+
+static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq,
+ struct scan_control *sc, int swappiness, unsigned long opt)
+{
+ struct lruvec *lruvec;
+ int err = -EINVAL;
+ struct mem_cgroup *memcg = NULL;
+
+ if (nid < 0 || nid >= MAX_NUMNODES || !node_state(nid, N_MEMORY))
+ return -EINVAL;
+
+ if (!mem_cgroup_disabled()) {
+ rcu_read_lock();
+ memcg = mem_cgroup_from_id(memcg_id);
+#ifdef CONFIG_MEMCG
+ if (memcg && !css_tryget(&memcg->css))
+ memcg = NULL;
+#endif
+ rcu_read_unlock();
+
+ if (!memcg)
+ return -EINVAL;
+ }
+
+ if (memcg_id != mem_cgroup_id(memcg))
+ goto done;
+
+ lruvec = get_lruvec(memcg, nid);
+
+ if (swappiness < 0)
+ swappiness = get_swappiness(lruvec, sc);
+ else if (swappiness > 200)
+ goto done;
+
+ switch (cmd) {
+ case '+':
+ err = run_aging(lruvec, seq, sc, swappiness, opt);
+ break;
+ case '-':
+ err = run_eviction(lruvec, seq, sc, swappiness, opt);
+ break;
+ }
+done:
+ mem_cgroup_put(memcg);
+
+ return err;
+}
+
+/* see Documentation/admin-guide/mm/multigen_lru.rst for details */
+static ssize_t lru_gen_seq_write(struct file *file, const char __user *src,
+ size_t len, loff_t *pos)
+{
+ void *buf;
+ char *cur, *next;
+ unsigned int flags;
+ struct blk_plug plug;
+ int err = -EINVAL;
+ struct scan_control sc = {
+ .may_writepage = true,
+ .may_unmap = true,
+ .may_swap = true,
+ .reclaim_idx = MAX_NR_ZONES - 1,
+ .gfp_mask = GFP_KERNEL,
+ };
+
+ buf = kvmalloc(len + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, src, len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ set_task_reclaim_state(current, &sc.reclaim_state);
+ flags = memalloc_noreclaim_save();
+ blk_start_plug(&plug);
+ if (!set_mm_walk(NULL)) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ next = buf;
+ next[len] = '\0';
+
+ while ((cur = strsep(&next, ",;\n"))) {
+ int n;
+ int end;
+ char cmd;
+ unsigned int memcg_id;
+ unsigned int nid;
+ unsigned long seq;
+ unsigned int swappiness = -1;
+ unsigned long opt = -1;
+
+ cur = skip_spaces(cur);
+ if (!*cur)
+ continue;
+
+ n = sscanf(cur, "%c %u %u %lu %n %u %n %lu %n", &cmd, &memcg_id, &nid,
+ &seq, &end, &swappiness, &end, &opt, &end);
+ if (n < 4 || cur[end]) {
+ err = -EINVAL;
+ break;
+ }
+
+ err = run_cmd(cmd, memcg_id, nid, seq, &sc, swappiness, opt);
+ if (err)
+ break;
+ }
+done:
+ clear_mm_walk();
+ blk_finish_plug(&plug);
+ memalloc_noreclaim_restore(flags);
+ set_task_reclaim_state(current, NULL);
+
+ kvfree(buf);
+
+ return err ? : len;
+}
+
+static int lru_gen_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &lru_gen_seq_ops);
+}
+
+static const struct file_operations lru_gen_rw_fops = {
+ .open = lru_gen_seq_open,
+ .read = seq_read,
+ .write = lru_gen_seq_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations lru_gen_ro_fops = {
+ .open = lru_gen_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/******************************************************************************
+ * initialization
+ ******************************************************************************/
+
+void lru_gen_init_lruvec(struct lruvec *lruvec)
+{
+ int i;
+ int gen, type, zone;
+ struct lru_gen_struct *lrugen = &lruvec->lrugen;
+
+ lrugen->max_seq = MIN_NR_GENS + 1;
+ lrugen->enabled = lru_gen_enabled();
+
+ for (i = 0; i <= MIN_NR_GENS + 1; i++)
+ lrugen->timestamps[i] = jiffies;
+
+ for_each_gen_type_zone(gen, type, zone)
+ INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
+
+ lruvec->mm_state.seq = MIN_NR_GENS;
+ init_waitqueue_head(&lruvec->mm_state.wait);
+}
+
+#ifdef CONFIG_MEMCG
+void lru_gen_init_memcg(struct mem_cgroup *memcg)
+{
+ INIT_LIST_HEAD(&memcg->mm_list.fifo);
+ spin_lock_init(&memcg->mm_list.lock);
+}
+
+void lru_gen_exit_memcg(struct mem_cgroup *memcg)
+{
+ int i;
+ int nid;
+
+ for_each_node(nid) {
+ struct lruvec *lruvec = get_lruvec(memcg, nid);
+
+ VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0,
+ sizeof(lruvec->lrugen.nr_pages)));
+
+ for (i = 0; i < NR_BLOOM_FILTERS; i++) {
+ bitmap_free(lruvec->mm_state.filters[i]);
+ lruvec->mm_state.filters[i] = NULL;
+ }
+ }
+}
+#endif
+
+static int __init init_lru_gen(void)
+{
+ BUILD_BUG_ON(MIN_NR_GENS + 1 >= MAX_NR_GENS);
+ BUILD_BUG_ON(BIT(LRU_GEN_WIDTH) <= MAX_NR_GENS);
+
+ if (sysfs_create_group(mm_kobj, &lru_gen_attr_group))
+ pr_err("lru_gen: failed to create sysfs group\n");
+
+ debugfs_create_file("lru_gen", 0644, NULL, NULL, &lru_gen_rw_fops);
+ debugfs_create_file("lru_gen_full", 0444, NULL, NULL, &lru_gen_ro_fops);
+
+ return 0;
+};
+late_initcall(init_lru_gen);
+
+#else /* !CONFIG_LRU_GEN */
+
+static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+{
+}
+
+static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
{
unsigned long nr[NR_LRU_LISTS];
@@ -2958,6 +5847,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
struct blk_plug plug;
bool scan_adjusted;
+ if (lru_gen_enabled()) {
+ lru_gen_shrink_lruvec(lruvec, sc);
+ return;
+ }
+
get_scan_count(lruvec, sc, nr);
/* Record the original scan target for proportional adjustments later */
@@ -3197,109 +6091,16 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
unsigned long nr_reclaimed, nr_scanned;
struct lruvec *target_lruvec;
bool reclaimable = false;
- unsigned long file;
target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
again:
- /*
- * Flush the memory cgroup stats, so that we read accurate per-memcg
- * lruvec stats for heuristics.
- */
- mem_cgroup_flush_stats();
-
memset(&sc->nr, 0, sizeof(sc->nr));
nr_reclaimed = sc->nr_reclaimed;
nr_scanned = sc->nr_scanned;
- /*
- * Determine the scan balance between anon and file LRUs.
- */
- spin_lock_irq(&target_lruvec->lru_lock);
- sc->anon_cost = target_lruvec->anon_cost;
- sc->file_cost = target_lruvec->file_cost;
- spin_unlock_irq(&target_lruvec->lru_lock);
-
- /*
- * Target desirable inactive:active list ratios for the anon
- * and file LRU lists.
- */
- if (!sc->force_deactivate) {
- unsigned long refaults;
-
- refaults = lruvec_page_state(target_lruvec,
- WORKINGSET_ACTIVATE_ANON);
- if (refaults != target_lruvec->refaults[0] ||
- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
- sc->may_deactivate |= DEACTIVATE_ANON;
- else
- sc->may_deactivate &= ~DEACTIVATE_ANON;
-
- /*
- * When refaults are being observed, it means a new
- * workingset is being established. Deactivate to get
- * rid of any stale active pages quickly.
- */
- refaults = lruvec_page_state(target_lruvec,
- WORKINGSET_ACTIVATE_FILE);
- if (refaults != target_lruvec->refaults[1] ||
- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
- sc->may_deactivate |= DEACTIVATE_FILE;
- else
- sc->may_deactivate &= ~DEACTIVATE_FILE;
- } else
- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
-
- /*
- * If we have plenty of inactive file pages that aren't
- * thrashing, try to reclaim those first before touching
- * anonymous pages.
- */
- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
- sc->cache_trim_mode = 1;
- else
- sc->cache_trim_mode = 0;
-
- /*
- * Prevent the reclaimer from falling into the cache trap: as
- * cache pages start out inactive, every cache fault will tip
- * the scan balance towards the file LRU. And as the file LRU
- * shrinks, so does the window for rotation from references.
- * This means we have a runaway feedback loop where a tiny
- * thrashing file LRU becomes infinitely more attractive than
- * anon pages. Try to detect this based on file LRU size.
- */
- if (!cgroup_reclaim(sc)) {
- unsigned long total_high_wmark = 0;
- unsigned long free, anon;
- int z;
-
- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
- file = node_page_state(pgdat, NR_ACTIVE_FILE) +
- node_page_state(pgdat, NR_INACTIVE_FILE);
-
- for (z = 0; z < MAX_NR_ZONES; z++) {
- struct zone *zone = &pgdat->node_zones[z];
- if (!managed_zone(zone))
- continue;
-
- total_high_wmark += high_wmark_pages(zone);
- }
-
- /*
- * Consider anon: if that's low too, this isn't a
- * runaway file reclaim problem, but rather just
- * extreme pressure. Reclaim as per usual then.
- */
- anon = node_page_state(pgdat, NR_INACTIVE_ANON);
-
- sc->file_is_tiny =
- file + free <= total_high_wmark &&
- !(sc->may_deactivate & DEACTIVATE_ANON) &&
- anon >> sc->priority;
- }
+ prepare_scan_count(pgdat, sc);
shrink_node_memcgs(pgdat, sc);
@@ -3557,11 +6358,14 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
struct lruvec *target_lruvec;
unsigned long refaults;
+ if (lru_gen_enabled())
+ return;
+
target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
- target_lruvec->refaults[0] = refaults;
+ target_lruvec->refaults[WORKINGSET_ANON] = refaults;
refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
- target_lruvec->refaults[1] = refaults;
+ target_lruvec->refaults[WORKINGSET_FILE] = refaults;
}
/*
@@ -3923,12 +6727,16 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
}
#endif
-static void age_active_anon(struct pglist_data *pgdat,
- struct scan_control *sc)
+static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
{
struct mem_cgroup *memcg;
struct lruvec *lruvec;
+ if (lru_gen_enabled()) {
+ lru_gen_age_node(pgdat, sc);
+ return;
+ }
+
if (!can_age_anon_pages(pgdat, sc))
return;
@@ -4248,12 +7056,11 @@ restart:
sc.may_swap = !nr_boost_reclaim;
/*
- * Do some background aging of the anon list, to give
- * pages a chance to be referenced before reclaiming. All
- * pages are rotated regardless of classzone as this is
- * about consistent aging.
+ * Do some background aging, to give pages a chance to be
+ * referenced before reclaiming. All pages are rotated
+ * regardless of classzone as this is about consistent aging.
*/
- age_active_anon(pgdat, &sc);
+ kswapd_age_node(pgdat, &sc);
/*
* If we're getting trouble reclaiming, start doing writepage
@@ -4643,16 +7450,17 @@ void kswapd_run(int nid)
{
pg_data_t *pgdat = NODE_DATA(nid);
- if (pgdat->kswapd)
- return;
-
- pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
- if (IS_ERR(pgdat->kswapd)) {
- /* failure at boot is fatal */
- BUG_ON(system_state < SYSTEM_RUNNING);
- pr_err("Failed to start kswapd on node %d\n", nid);
- pgdat->kswapd = NULL;
+ pgdat_kswapd_lock(pgdat);
+ if (!pgdat->kswapd) {
+ pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ if (IS_ERR(pgdat->kswapd)) {
+ /* failure at boot is fatal */
+ BUG_ON(system_state < SYSTEM_RUNNING);
+ pr_err("Failed to start kswapd on node %d\n", nid);
+ pgdat->kswapd = NULL;
+ }
}
+ pgdat_kswapd_unlock(pgdat);
}
/*
@@ -4661,12 +7469,16 @@ void kswapd_run(int nid)
*/
void kswapd_stop(int nid)
{
- struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+ pg_data_t *pgdat = NODE_DATA(nid);
+ struct task_struct *kswapd;
+ pgdat_kswapd_lock(pgdat);
+ kswapd = pgdat->kswapd;
if (kswapd) {
kthread_stop(kswapd);
- NODE_DATA(nid)->kswapd = NULL;
+ pgdat->kswapd = NULL;
}
+ pgdat_kswapd_unlock(pgdat);
}
static int __init kswapd_init(void)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 30686e6b4145..b2371d745e00 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -28,7 +28,6 @@
#include <linux/mm_inline.h>
#include <linux/page_ext.h>
#include <linux/page_owner.h>
-#include <linux/migrate.h>
#include "internal.h"
@@ -1241,6 +1240,7 @@ const char * const vmstat_text[] = {
#endif
#ifdef CONFIG_NUMA_BALANCING
"pgpromote_success",
+ "pgpromote_candidate",
#endif
/* enum writeback_stat_item counters */
@@ -1378,10 +1378,6 @@ const char * const vmstat_text[] = {
"nr_tlb_local_flush_one",
#endif /* CONFIG_DEBUG_TLBFLUSH */
-#ifdef CONFIG_DEBUG_VM_VMACACHE
- "vmacache_find_calls",
- "vmacache_find_hits",
-#endif
#ifdef CONFIG_SWAP
"swap_ra",
"swap_ra_hit",
@@ -2056,7 +2052,6 @@ static int vmstat_cpu_online(unsigned int cpu)
if (!node_state(cpu_to_node(cpu), N_CPU)) {
node_set_state(cpu_to_node(cpu), N_CPU);
- set_migration_target_nodes();
}
return 0;
@@ -2081,7 +2076,6 @@ static int vmstat_cpu_dead(unsigned int cpu)
return 0;
node_clear_state(node, N_CPU);
- set_migration_target_nodes();
return 0;
}
@@ -2114,7 +2108,6 @@ void __init init_mm_internals(void)
start_shepherd_timer();
#endif
- migrate_on_reclaim_init();
#ifdef CONFIG_PROC_FS
proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
diff --git a/mm/workingset.c b/mm/workingset.c
index a5e84862fc86..ae7e984b23c6 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -187,7 +187,6 @@ static unsigned int bucket_order __read_mostly;
static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction,
bool workingset)
{
- eviction >>= bucket_order;
eviction &= EVICTION_MASK;
eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid;
eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
@@ -212,10 +211,107 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
*memcgidp = memcgid;
*pgdat = NODE_DATA(nid);
- *evictionp = entry << bucket_order;
+ *evictionp = entry;
*workingsetp = workingset;
}
+#ifdef CONFIG_LRU_GEN
+
+static void *lru_gen_eviction(struct folio *folio)
+{
+ int hist;
+ unsigned long token;
+ unsigned long min_seq;
+ struct lruvec *lruvec;
+ struct lru_gen_struct *lrugen;
+ int type = folio_is_file_lru(folio);
+ int delta = folio_nr_pages(folio);
+ int refs = folio_lru_refs(folio);
+ int tier = lru_tier_from_refs(refs);
+ struct mem_cgroup *memcg = folio_memcg(folio);
+ struct pglist_data *pgdat = folio_pgdat(folio);
+
+ BUILD_BUG_ON(LRU_GEN_WIDTH + LRU_REFS_WIDTH > BITS_PER_LONG - EVICTION_SHIFT);
+
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ lrugen = &lruvec->lrugen;
+ min_seq = READ_ONCE(lrugen->min_seq[type]);
+ token = (min_seq << LRU_REFS_WIDTH) | max(refs - 1, 0);
+
+ hist = lru_hist_from_seq(min_seq);
+ atomic_long_add(delta, &lrugen->evicted[hist][type][tier]);
+
+ return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
+}
+
+static void lru_gen_refault(struct folio *folio, void *shadow)
+{
+ int hist, tier, refs;
+ int memcg_id;
+ bool workingset;
+ unsigned long token;
+ unsigned long min_seq;
+ struct lruvec *lruvec;
+ struct lru_gen_struct *lrugen;
+ struct mem_cgroup *memcg;
+ struct pglist_data *pgdat;
+ int type = folio_is_file_lru(folio);
+ int delta = folio_nr_pages(folio);
+
+ unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset);
+
+ if (pgdat != folio_pgdat(folio))
+ return;
+
+ rcu_read_lock();
+
+ memcg = folio_memcg_rcu(folio);
+ if (memcg_id != mem_cgroup_id(memcg))
+ goto unlock;
+
+ lruvec = mem_cgroup_lruvec(memcg, pgdat);
+ lrugen = &lruvec->lrugen;
+
+ min_seq = READ_ONCE(lrugen->min_seq[type]);
+ if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)))
+ goto unlock;
+
+ hist = lru_hist_from_seq(min_seq);
+ /* see the comment in folio_lru_refs() */
+ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset;
+ tier = lru_tier_from_refs(refs);
+
+ atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]);
+ mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta);
+
+ /*
+ * Count the following two cases as stalls:
+ * 1. For pages accessed through page tables, hotter pages pushed out
+ * hot pages which refaulted immediately.
+ * 2. For pages accessed multiple times through file descriptors,
+ * numbers of accesses might have been out of the range.
+ */
+ if (lru_gen_in_fault() || refs == BIT(LRU_REFS_WIDTH)) {
+ folio_set_workingset(folio);
+ mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta);
+ }
+unlock:
+ rcu_read_unlock();
+}
+
+#else /* !CONFIG_LRU_GEN */
+
+static void *lru_gen_eviction(struct folio *folio)
+{
+ return NULL;
+}
+
+static void lru_gen_refault(struct folio *folio, void *shadow)
+{
+}
+
+#endif /* CONFIG_LRU_GEN */
+
/**
* workingset_age_nonresident - age non-resident entries as LRU ages
* @lruvec: the lruvec that was aged
@@ -264,10 +360,14 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg)
VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+ if (lru_gen_enabled())
+ return lru_gen_eviction(folio);
+
lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
/* XXX: target_memcg can be NULL, go through lruvec */
memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
eviction = atomic_long_read(&lruvec->nonresident_age);
+ eviction >>= bucket_order;
workingset_age_nonresident(lruvec, folio_nr_pages(folio));
return pack_shadow(memcgid, pgdat, eviction,
folio_test_workingset(folio));
@@ -298,7 +398,13 @@ void workingset_refault(struct folio *folio, void *shadow)
int memcgid;
long nr;
+ if (lru_gen_enabled()) {
+ lru_gen_refault(folio, shadow);
+ return;
+ }
+
unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset);
+ eviction <<= bucket_order;
rcu_read_lock();
/*
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 907c9b1e1e61..525758713a55 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -472,12 +472,12 @@ static inline struct page *get_first_page(struct zspage *zspage)
return first_page;
}
-static inline int get_first_obj_offset(struct page *page)
+static inline unsigned int get_first_obj_offset(struct page *page)
{
return page->page_type;
}
-static inline void set_first_obj_offset(struct page *page, int offset)
+static inline void set_first_obj_offset(struct page *page, unsigned int offset)
{
page->page_type = offset;
}
@@ -1555,6 +1555,13 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
d_off += size;
d_size -= size;
+ /*
+ * Calling kunmap_atomic(d_addr) is necessary. kunmap_atomic()
+ * calls must occurs in reverse order of calls to kmap_atomic().
+ * So, to call kunmap_atomic(s_addr) we should first call
+ * kunmap_atomic(d_addr). For more details see
+ * Documentation/mm/highmem.rst.
+ */
if (s_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
@@ -1585,7 +1592,7 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
static unsigned long find_alloced_obj(struct size_class *class,
struct page *page, int *obj_idx)
{
- int offset = 0;
+ unsigned int offset;
int index = *obj_idx;
unsigned long handle = 0;
void *addr = kmap_atomic(page);
@@ -1839,7 +1846,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
struct zspage *zspage;
struct page *dummy;
void *s_addr, *d_addr, *addr;
- int offset;
+ unsigned int offset;
unsigned long handle;
unsigned long old_obj, new_obj;
unsigned int obj_idx;
@@ -2103,8 +2110,6 @@ unsigned long zs_compact(struct zs_pool *pool)
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i];
- if (!class)
- continue;
if (class->index != i)
continue;
pages_freed += __zs_compact(pool, class);
@@ -2149,8 +2154,6 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
class = pool->size_class[i];
- if (!class)
- continue;
if (class->index != i)
continue;
@@ -2308,9 +2311,6 @@ void zs_destroy_pool(struct zs_pool *pool)
int fg;
struct size_class *class = pool->size_class[i];
- if (!class)
- continue;
-
if (class->index != i)
continue;
diff --git a/mm/zswap.c b/mm/zswap.c
index 104835b379ec..2d48fd59cc7a 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1026,7 +1026,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
SetPageReclaim(page);
/* start writeback */
- __swap_writepage(page, &wbc, end_swap_bio_write);
+ __swap_writepage(page, &wbc);
put_page(page);
zswap_written_back_pages++;
diff --git a/scripts/Makefile.kmsan b/scripts/Makefile.kmsan
new file mode 100644
index 000000000000..b5b0aa61322e
--- /dev/null
+++ b/scripts/Makefile.kmsan
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+kmsan-cflags := -fsanitize=kernel-memory
+
+ifdef CONFIG_KMSAN_CHECK_PARAM_RETVAL
+kmsan-cflags += -fsanitize-memory-param-retval
+endif
+
+export CFLAGS_KMSAN := $(kmsan-cflags)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 09d08f93dcd3..3aa384cec76b 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -164,6 +164,15 @@ _c_flags += $(if $(patsubst n%,, \
endif
endif
+ifeq ($(CONFIG_KMSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(KMSAN_SANITIZE_$(basetarget).o)$(KMSAN_SANITIZE)y), \
+ $(CFLAGS_KMSAN))
+_c_flags += $(if $(patsubst n%,, \
+ $(KMSAN_ENABLE_CHECKS_$(basetarget).o)$(KMSAN_ENABLE_CHECKS)y), \
+ , -mllvm -msan-disable-checks=1)
+endif
+
ifeq ($(CONFIG_UBSAN),y)
_c_flags += $(if $(patsubst n%,, \
$(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index 995bc42003e6..d766b7d0ffd1 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -112,6 +112,7 @@ choice
config INIT_STACK_ALL_PATTERN
bool "pattern-init everything (strongest)"
depends on CC_HAS_AUTO_VAR_INIT_PATTERN
+ depends on !KMSAN
help
Initializes everything on the stack (including padding)
with a specific debug value. This is intended to eliminate
@@ -130,6 +131,7 @@ choice
config INIT_STACK_ALL_ZERO
bool "zero-init everything (strongest and safest)"
depends on CC_HAS_AUTO_VAR_INIT_ZERO
+ depends on !KMSAN
help
Initializes everything on the stack (including padding)
with a zero value. This is intended to eliminate all
@@ -224,6 +226,7 @@ config STACKLEAK_RUNTIME_DISABLE
config INIT_ON_ALLOC_DEFAULT_ON
bool "Enable heap memory zeroing on allocation by default"
+ depends on !KMSAN
help
This has the effect of setting "init_on_alloc=1" on the kernel
command line. This can be disabled with "init_on_alloc=0".
@@ -236,6 +239,7 @@ config INIT_ON_ALLOC_DEFAULT_ON
config INIT_ON_FREE_DEFAULT_ON
bool "Enable heap memory zeroing on free by default"
+ depends on !KMSAN
help
This has the effect of setting "init_on_free=1" on the kernel
command line. This can be disabled with "init_on_free=0".
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h
index 4f3d5aaa11f5..de687009bfe5 100644
--- a/tools/include/asm-generic/hugetlb_encode.h
+++ b/tools/include/asm-generic/hugetlb_encode.h
@@ -20,18 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_16KB (14 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 0616409513eb..311759ea25e9 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
unsigned int align, unsigned int flags,
void (*ctor)(void *));
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ void **list);
+
#endif /* _TOOLS_SLAB_H */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index 6c1aa92a92e4..6ce1f1ceb432 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -77,6 +77,8 @@
#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 715f35a8cc00..43ec14c29a60 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1062,6 +1062,26 @@ static const char *uaccess_safe_builtin[] = {
"__sanitizer_cov_trace_cmp4",
"__sanitizer_cov_trace_cmp8",
"__sanitizer_cov_trace_switch",
+ /* KMSAN */
+ "kmsan_copy_to_user",
+ "kmsan_report",
+ "kmsan_unpoison_entry_regs",
+ "kmsan_unpoison_memory",
+ "__msan_chain_origin",
+ "__msan_get_context_state",
+ "__msan_instrument_asm_store",
+ "__msan_metadata_ptr_for_load_1",
+ "__msan_metadata_ptr_for_load_2",
+ "__msan_metadata_ptr_for_load_4",
+ "__msan_metadata_ptr_for_load_8",
+ "__msan_metadata_ptr_for_load_n",
+ "__msan_metadata_ptr_for_store_1",
+ "__msan_metadata_ptr_for_store_2",
+ "__msan_metadata_ptr_for_store_4",
+ "__msan_metadata_ptr_for_store_8",
+ "__msan_metadata_ptr_for_store_n",
+ "__msan_poison_alloca",
+ "__msan_warning",
/* UBSAN */
"ubsan_type_mismatch_common",
"__ubsan_handle_type_mismatch",
diff --git a/tools/testing/memblock/linux/mmzone.h b/tools/testing/memblock/linux/mmzone.h
index 7c2eb5c9bb54..e65f89b12f1c 100644
--- a/tools/testing/memblock/linux/mmzone.h
+++ b/tools/testing/memblock/linux/mmzone.h
@@ -22,6 +22,8 @@ enum zone_type {
#define pageblock_order (MAX_ORDER - 1)
#define pageblock_nr_pages BIT(pageblock_order)
+#define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages)
+#define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages)
struct zone {
atomic_long_t managed_pages;
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index d971516401e6..c901d96dd013 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -6,3 +6,5 @@ main
multiorder
radix-tree.c
xarray
+maple
+ma_xa_benchmark
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index c4ea4fbb0bfc..89d613e0505b 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -4,9 +4,9 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
-fsanitize=undefined
LDFLAGS += -fsanitize=address -fsanitize=undefined
LDLIBS+= -lpthread -lurcu
-TARGETS = main idr-test multiorder xarray
+TARGETS = main idr-test multiorder xarray maple
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o \
- slab.o
+ slab.o maple.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
regression4.o tag_check.o multiorder.o idr-test.o iteration_check.o \
iteration_check_2.o benchmark.o
@@ -29,6 +29,8 @@ idr-test: idr-test.o $(CORE_OFILES)
xarray: $(CORE_OFILES)
+maple: $(CORE_OFILES)
+
multiorder: multiorder.o $(CORE_OFILES)
clean:
@@ -40,6 +42,7 @@ $(OFILES): Makefile *.h */*.h generated/map-shift.h \
../../include/linux/*.h \
../../include/asm/*.h \
../../../include/linux/xarray.h \
+ ../../../include/linux/maple_tree.h \
../../../include/linux/radix-tree.h \
../../../include/linux/idr.h
@@ -51,6 +54,8 @@ idr.c: ../../../lib/idr.c
xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
+maple.o: ../../../lib/maple_tree.c ../../../lib/test_maple_tree.c
+
generated/map-shift.h:
@if ! grep -qws $(SHIFT) generated/map-shift.h; then \
echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h
index 2218b3cc184e..e7da80350236 100644
--- a/tools/testing/radix-tree/generated/autoconf.h
+++ b/tools/testing/radix-tree/generated/autoconf.h
@@ -1 +1,2 @@
#define CONFIG_XARRAY_MULTI 1
+#define CONFIG_64BIT 1
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index d5c1bcba86fe..2048d12c31df 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -23,15 +23,47 @@ struct kmem_cache {
int nr_objs;
void *objs;
void (*ctor)(void *);
+ unsigned int non_kernel;
+ unsigned long nr_allocated;
+ unsigned long nr_tallocated;
};
+void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
+{
+ cachep->non_kernel = val;
+}
+
+unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
+{
+ return cachep->size * cachep->nr_allocated;
+}
+
+unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
+{
+ return cachep->nr_allocated;
+}
+
+unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
+{
+ return cachep->nr_tallocated;
+}
+
+void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
+{
+ cachep->nr_tallocated = 0;
+}
+
void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
int gfp)
{
void *p;
- if (!(gfp & __GFP_DIRECT_RECLAIM))
- return NULL;
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (!cachep->non_kernel)
+ return NULL;
+
+ cachep->non_kernel--;
+ }
pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs) {
@@ -53,19 +85,21 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
memset(p, 0, cachep->size);
}
+ uatomic_inc(&cachep->nr_allocated);
uatomic_inc(&nr_allocated);
+ uatomic_inc(&cachep->nr_tallocated);
if (kmalloc_verbose)
printf("Allocating %p from slab\n", p);
return p;
}
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
{
assert(objp);
uatomic_dec(&nr_allocated);
+ uatomic_dec(&cachep->nr_allocated);
if (kmalloc_verbose)
printf("Freeing %p to slab\n", objp);
- pthread_mutex_lock(&cachep->lock);
if (cachep->nr_objs > 10 || cachep->align) {
memset(objp, POISON_FREE, cachep->size);
free(objp);
@@ -75,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
node->parent = cachep->objs;
cachep->objs = node;
}
+}
+
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+{
+ pthread_mutex_lock(&cachep->lock);
+ kmem_cache_free_locked(cachep, objp);
pthread_mutex_unlock(&cachep->lock);
}
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+{
+ if (kmalloc_verbose)
+ pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+
+ pthread_mutex_lock(&cachep->lock);
+ for (int i = 0; i < size; i++)
+ kmem_cache_free_locked(cachep, list[i]);
+ pthread_mutex_unlock(&cachep->lock);
+}
+
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ void **p)
+{
+ size_t i;
+
+ if (kmalloc_verbose)
+ pr_debug("Bulk alloc %lu\n", size);
+
+ if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+ if (cachep->non_kernel < size)
+ return 0;
+
+ cachep->non_kernel -= size;
+ }
+
+ pthread_mutex_lock(&cachep->lock);
+ if (cachep->nr_objs >= size) {
+ struct radix_tree_node *node;
+
+ for (i = 0; i < size; i++) {
+ node = cachep->objs;
+ cachep->nr_objs--;
+ cachep->objs = node->parent;
+ p[i] = node;
+ node->parent = NULL;
+ }
+ pthread_mutex_unlock(&cachep->lock);
+ } else {
+ pthread_mutex_unlock(&cachep->lock);
+ for (i = 0; i < size; i++) {
+ if (cachep->align) {
+ posix_memalign(&p[i], cachep->align,
+ cachep->size * size);
+ } else {
+ p[i] = malloc(cachep->size * size);
+ }
+ if (cachep->ctor)
+ cachep->ctor(p[i]);
+ else if (gfp & __GFP_ZERO)
+ memset(p[i], 0, cachep->size);
+ }
+ }
+
+ for (i = 0; i < size; i++) {
+ uatomic_inc(&nr_allocated);
+ uatomic_inc(&cachep->nr_allocated);
+ uatomic_inc(&cachep->nr_tallocated);
+ if (kmalloc_verbose)
+ printf("Allocating %p from slab\n", p[i]);
+ }
+
+ return size;
+}
+
struct kmem_cache *
kmem_cache_create(const char *name, unsigned int size, unsigned int align,
unsigned int flags, void (*ctor)(void *))
@@ -88,7 +193,54 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
ret->size = size;
ret->align = align;
ret->nr_objs = 0;
+ ret->nr_allocated = 0;
+ ret->nr_tallocated = 0;
ret->objs = NULL;
ret->ctor = ctor;
+ ret->non_kernel = 0;
return ret;
}
+
+/*
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
+ */
+void test_kmem_cache_bulk(void)
+{
+ int i;
+ void *list[12];
+ static struct kmem_cache *test_cache, *test_cache2;
+
+ /*
+ * Testing the bulk allocators without aligned kmem_cache to force the
+ * bulk alloc/free to reuse
+ */
+ test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
+
+ for (i = 0; i < 5; i++)
+ list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+ for (i = 0; i < 5; i++)
+ kmem_cache_free(test_cache, list[i]);
+ assert(test_cache->nr_objs == 5);
+
+ kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
+ kmem_cache_free_bulk(test_cache, 5, list);
+
+ for (i = 0; i < 12 ; i++)
+ list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+ for (i = 0; i < 12; i++)
+ kmem_cache_free(test_cache, list[i]);
+
+ /* The last free will not be kept around */
+ assert(test_cache->nr_objs == 11);
+
+ /* Aligned caches will immediately free */
+ test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
+
+ kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
+ kmem_cache_free_bulk(test_cache2, 10, list);
+ assert(!test_cache2->nr_objs);
+
+
+}
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index 39867fd80c8f..c5c9d05f29da 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -14,6 +14,7 @@
#include "../../../include/linux/kconfig.h"
#define printk printf
+#define pr_err printk
#define pr_info printk
#define pr_debug printk
#define pr_cont printk
diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h
index 016cff473cfc..62473ab57f99 100644
--- a/tools/testing/radix-tree/linux/lockdep.h
+++ b/tools/testing/radix-tree/linux/lockdep.h
@@ -11,4 +11,6 @@ static inline void lockdep_set_class(spinlock_t *lock,
struct lock_class_key *key)
{
}
+
+extern int lockdep_is_held(const void *);
#endif /* _LINUX_LOCKDEP_H */
diff --git a/tools/testing/radix-tree/linux/maple_tree.h b/tools/testing/radix-tree/linux/maple_tree.h
new file mode 100644
index 000000000000..7d8d1f445b89
--- /dev/null
+++ b/tools/testing/radix-tree/linux/maple_tree.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#define atomic_t int32_t
+#include "../../../../include/linux/maple_tree.h"
+#define atomic_inc(x) uatomic_inc(x)
+#define atomic_read(x) uatomic_read(x)
+#define atomic_set(x, y) do {} while (0)
+#define U8_MAX UCHAR_MAX
diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c
new file mode 100644
index 000000000000..35082671928a
--- /dev/null
+++ b/tools/testing/radix-tree/maple.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * maple_tree.c: Userspace shim for maple tree test-suite
+ * Copyright (c) 2018 Liam R. Howlett <Liam.Howlett@Oracle.com>
+ */
+
+#define CONFIG_DEBUG_MAPLE_TREE
+#define CONFIG_MAPLE_SEARCH
+#include "test.h"
+
+#define module_init(x)
+#define module_exit(x)
+#define MODULE_AUTHOR(x)
+#define MODULE_LICENSE(x)
+#define dump_stack() assert(0)
+
+#include "../../../lib/maple_tree.c"
+#undef CONFIG_DEBUG_MAPLE_TREE
+#include "../../../lib/test_maple_tree.c"
+
+void farmer_tests(void)
+{
+ struct maple_node *node;
+ DEFINE_MTREE(tree);
+
+ mt_dump(&tree);
+
+ tree.ma_root = xa_mk_value(0);
+ mt_dump(&tree);
+
+ node = mt_alloc_one(GFP_KERNEL);
+ node->parent = (void *)((unsigned long)(&tree) | 1);
+ node->slot[0] = xa_mk_value(0);
+ node->slot[1] = xa_mk_value(1);
+ node->mr64.pivot[0] = 0;
+ node->mr64.pivot[1] = 1;
+ node->mr64.pivot[2] = 0;
+ tree.ma_root = mt_mk_node(node, maple_leaf_64);
+ mt_dump(&tree);
+
+ ma_free_rcu(node);
+}
+
+void maple_tree_tests(void)
+{
+ farmer_tests();
+ maple_tree_seed();
+ maple_tree_harvest();
+}
+
+int __weak main(void)
+{
+ maple_tree_init();
+ maple_tree_tests();
+ rcu_barrier();
+ if (nr_allocated)
+ printf("nr_allocated = %d\n", nr_allocated);
+ return 0;
+}
diff --git a/tools/testing/radix-tree/trace/events/maple_tree.h b/tools/testing/radix-tree/trace/events/maple_tree.h
new file mode 100644
index 000000000000..97d0e1ddcf08
--- /dev/null
+++ b/tools/testing/radix-tree/trace/events/maple_tree.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define trace_ma_op(a, b) do {} while (0)
+#define trace_ma_read(a, b) do {} while (0)
+#define trace_ma_write(a, b, c, d) do {} while (0)
diff --git a/tools/testing/selftests/cgroup/config b/tools/testing/selftests/cgroup/config
index 84fe884fad86..97d549ee894f 100644
--- a/tools/testing/selftests/cgroup/config
+++ b/tools/testing/selftests/cgroup/config
@@ -4,5 +4,4 @@ CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_MEMCG=y
CONFIG_MEMCG_KMEM=y
-CONFIG_MEMCG_SWAP=y
CONFIG_PAGE_COUNTER=y
diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile
index 0470c5f3e690..a1fa2eff8192 100644
--- a/tools/testing/selftests/damon/Makefile
+++ b/tools/testing/selftests/damon/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_FILES += huge_count_read_write
TEST_FILES = _chk_dependency.sh _debugfs_common.sh
TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh
TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh
+TEST_PROGS += debugfs_duplicate_context_creation.sh
TEST_PROGS += sysfs.sh
include ../lib.mk
diff --git a/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh b/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
new file mode 100644
index 000000000000..4a76e37ef16b
--- /dev/null
+++ b/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+source _debugfs_common.sh
+
+# Test duplicated context creation
+# ================================
+
+if ! echo foo > "$DBGFS/mk_contexts"
+then
+ echo "context creation failed"
+ exit 1
+fi
+
+if echo foo > "$DBGFS/mk_contexts"
+then
+ echo "duplicate context creation success"
+ exit 1
+fi
+
+if ! echo foo > "$DBGFS/rm_contexts"
+then
+ echo "context deletion failed"
+ exit 1
+fi
+
+exit 0
diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore
index 31e5eea2a9b9..7b9dc2426f18 100644
--- a/tools/testing/selftests/vm/.gitignore
+++ b/tools/testing/selftests/vm/.gitignore
@@ -30,7 +30,6 @@ map_fixed_noreplace
write_to_hugetlbfs
hmm-tests
memfd_secret
-local_config.*
soft-dirty
split_huge_page_test
ksm_tests
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index d516b8c38eed..163c2fde3cb3 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,9 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm selftests
-LOCAL_HDRS += $(selfdir)/vm/local_config.h $(top_srcdir)/mm/gup_test.h
-
-include local_config.mk
+LOCAL_HDRS += $(top_srcdir)/mm/gup_test.h
uname_M := $(shell uname -m 2>/dev/null || echo not)
MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
@@ -97,9 +95,11 @@ TEST_FILES += va_128TBswitch.sh
include ../lib.mk
+$(OUTPUT)/khugepaged: vm_util.c
$(OUTPUT)/madv_populate: vm_util.c
$(OUTPUT)/soft-dirty: vm_util.c
$(OUTPUT)/split_huge_page_test: vm_util.c
+$(OUTPUT)/userfaultfd: vm_util.c
ifeq ($(MACHINE),x86_64)
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
@@ -152,23 +152,6 @@ endif
$(OUTPUT)/mlock-random-test $(OUTPUT)/memfd_secret: LDLIBS += -lcap
-# HMM_EXTRA_LIBS may get set in local_config.mk, or it may be left empty.
-$(OUTPUT)/hmm-tests: LDLIBS += $(HMM_EXTRA_LIBS)
-
$(OUTPUT)/ksm_tests: LDLIBS += -lnuma
$(OUTPUT)/migration: LDLIBS += -lnuma
-
-local_config.mk local_config.h: check_config.sh
- /bin/sh ./check_config.sh $(CC)
-
-EXTRA_CLEAN += local_config.mk local_config.h
-
-ifeq ($(HMM_EXTRA_LIBS),)
-all: warn_missing_hugelibs
-
-warn_missing_hugelibs:
- @echo ; \
- echo "Warning: missing libhugetlbfs support. Some HMM tests will be skipped." ; \
- echo
-endif
diff --git a/tools/testing/selftests/vm/check_config.sh b/tools/testing/selftests/vm/check_config.sh
deleted file mode 100644
index 079c8a40b85d..000000000000
--- a/tools/testing/selftests/vm/check_config.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-#
-# Probe for libraries and create header files to record the results. Both C
-# header files and Makefile include fragments are created.
-
-OUTPUT_H_FILE=local_config.h
-OUTPUT_MKFILE=local_config.mk
-
-# libhugetlbfs
-tmpname=$(mktemp)
-tmpfile_c=${tmpname}.c
-tmpfile_o=${tmpname}.o
-
-echo "#include <sys/types.h>" > $tmpfile_c
-echo "#include <hugetlbfs.h>" >> $tmpfile_c
-echo "int func(void) { return 0; }" >> $tmpfile_c
-
-CC=${1:?"Usage: $0 <compiler> # example compiler: gcc"}
-$CC -c $tmpfile_c -o $tmpfile_o >/dev/null 2>&1
-
-if [ -f $tmpfile_o ]; then
- echo "#define LOCAL_CONFIG_HAVE_LIBHUGETLBFS 1" > $OUTPUT_H_FILE
- echo "HMM_EXTRA_LIBS = -lhugetlbfs" > $OUTPUT_MKFILE
-else
- echo "// No libhugetlbfs support found" > $OUTPUT_H_FILE
- echo "# No libhugetlbfs support found, so:" > $OUTPUT_MKFILE
- echo "HMM_EXTRA_LIBS = " >> $OUTPUT_MKFILE
-fi
-
-rm ${tmpname}.*
diff --git a/tools/testing/selftests/vm/hmm-tests.c b/tools/testing/selftests/vm/hmm-tests.c
index 98b949c279be..7d722265dcd7 100644
--- a/tools/testing/selftests/vm/hmm-tests.c
+++ b/tools/testing/selftests/vm/hmm-tests.c
@@ -26,10 +26,6 @@
#include <sys/mman.h>
#include <sys/ioctl.h>
-#include "./local_config.h"
-#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
-#include <hugetlbfs.h>
-#endif
/*
* This is a private UAPI to the kernel test module so it isn't exported
@@ -733,7 +729,54 @@ TEST_F(hmm, anon_write_huge)
hmm_buffer_free(buffer);
}
-#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
+/*
+ * Read numeric data from raw and tagged kernel status files. Used to read
+ * /proc and /sys data (without a tag) and from /proc/meminfo (with a tag).
+ */
+static long file_read_ulong(char *file, const char *tag)
+{
+ int fd;
+ char buf[2048];
+ int len;
+ char *p, *q;
+ long val;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ /* Error opening the file */
+ return -1;
+ }
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0) {
+ /* Error in reading the file */
+ return -1;
+ }
+ if (len == sizeof(buf)) {
+ /* Error file is too large */
+ return -1;
+ }
+ buf[len] = '\0';
+
+ /* Search for a tag if provided */
+ if (tag) {
+ p = strstr(buf, tag);
+ if (!p)
+ return -1; /* looks like the line we want isn't there */
+ p += strlen(tag);
+ } else
+ p = buf;
+
+ val = strtol(p, &q, 0);
+ if (*q != ' ') {
+ /* Error parsing the file */
+ return -1;
+ }
+
+ return val;
+}
+
/*
* Write huge TLBFS page.
*/
@@ -742,29 +785,27 @@ TEST_F(hmm, anon_write_hugetlbfs)
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
+ unsigned long default_hsize;
unsigned long i;
int *ptr;
int ret;
- long pagesizes[4];
- int n, idx;
-
- /* Skip test if we can't allocate a hugetlbfs page. */
- n = gethugepagesizes(pagesizes, 4);
- if (n <= 0)
+ default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
+ if (default_hsize < 0 || default_hsize*1024 < default_hsize)
SKIP(return, "Huge page size could not be determined");
- for (idx = 0; --n > 0; ) {
- if (pagesizes[n] < pagesizes[idx])
- idx = n;
- }
- size = ALIGN(TWOMEG, pagesizes[idx]);
+ default_hsize = default_hsize*1024; /* KB to B */
+
+ size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
- buffer->ptr = get_hugepage_region(size, GHR_STRICT);
- if (buffer->ptr == NULL) {
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
+ -1, 0);
+ if (buffer->ptr == MAP_FAILED) {
free(buffer);
SKIP(return, "Huge page could not be allocated");
}
@@ -788,11 +829,10 @@ TEST_F(hmm, anon_write_hugetlbfs)
for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
ASSERT_EQ(ptr[i], i);
- free_hugepage_region(buffer->ptr);
+ munmap(buffer->ptr, buffer->size);
buffer->ptr = NULL;
hmm_buffer_free(buffer);
}
-#endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
/*
* Read mmap'ed file memory.
@@ -1467,7 +1507,6 @@ TEST_F(hmm2, snapshot)
hmm_buffer_free(buffer);
}
-#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
/*
* Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
* should be mapped by a large page table entry.
@@ -1477,30 +1516,30 @@ TEST_F(hmm, compound)
struct hmm_buffer *buffer;
unsigned long npages;
unsigned long size;
+ unsigned long default_hsize;
int *ptr;
unsigned char *m;
int ret;
- long pagesizes[4];
- int n, idx;
unsigned long i;
/* Skip test if we can't allocate a hugetlbfs page. */
- n = gethugepagesizes(pagesizes, 4);
- if (n <= 0)
- return;
- for (idx = 0; --n > 0; ) {
- if (pagesizes[n] < pagesizes[idx])
- idx = n;
- }
- size = ALIGN(TWOMEG, pagesizes[idx]);
+ default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:");
+ if (default_hsize < 0 || default_hsize*1024 < default_hsize)
+ SKIP(return, "Huge page size could not be determined");
+ default_hsize = default_hsize*1024; /* KB to B */
+
+ size = ALIGN(TWOMEG, default_hsize);
npages = size >> self->page_shift;
buffer = malloc(sizeof(*buffer));
ASSERT_NE(buffer, NULL);
- buffer->ptr = get_hugepage_region(size, GHR_STRICT);
- if (buffer->ptr == NULL) {
+ buffer->ptr = mmap(NULL, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB,
+ -1, 0);
+ if (buffer->ptr == MAP_FAILED) {
free(buffer);
return;
}
@@ -1539,11 +1578,10 @@ TEST_F(hmm, compound)
ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
HMM_DMIRROR_PROT_PMD);
- free_hugepage_region(buffer->ptr);
+ munmap(buffer->ptr, buffer->size);
buffer->ptr = NULL;
hmm_buffer_free(buffer);
}
-#endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
/*
* Test two devices reading the same memory (double mapped).
diff --git a/tools/testing/selftests/vm/khugepaged.c b/tools/testing/selftests/vm/khugepaged.c
index 155120b67a16..64126c8cd561 100644
--- a/tools/testing/selftests/vm/khugepaged.c
+++ b/tools/testing/selftests/vm/khugepaged.c
@@ -1,6 +1,9 @@
#define _GNU_SOURCE
+#include <ctype.h>
+#include <errno.h>
#include <fcntl.h>
#include <limits.h>
+#include <dirent.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@@ -10,10 +13,24 @@
#include <sys/mman.h>
#include <sys/wait.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/sysmacros.h>
+#include <sys/vfs.h>
+
+#include "linux/magic.h"
+
+#include "vm_util.h"
#ifndef MADV_PAGEOUT
#define MADV_PAGEOUT 21
#endif
+#ifndef MADV_POPULATE_READ
+#define MADV_POPULATE_READ 22
+#endif
+#ifndef MADV_COLLAPSE
+#define MADV_COLLAPSE 25
+#endif
#define BASE_ADDR ((void *)(1UL << 30))
static unsigned long hpage_pmd_size;
@@ -22,6 +39,47 @@ static int hpage_pmd_nr;
#define THP_SYSFS "/sys/kernel/mm/transparent_hugepage/"
#define PID_SMAPS "/proc/self/smaps"
+#define TEST_FILE "collapse_test_file"
+
+#define MAX_LINE_LENGTH 500
+
+enum vma_type {
+ VMA_ANON,
+ VMA_FILE,
+ VMA_SHMEM,
+};
+
+struct mem_ops {
+ void *(*setup_area)(int nr_hpages);
+ void (*cleanup_area)(void *p, unsigned long size);
+ void (*fault)(void *p, unsigned long start, unsigned long end);
+ bool (*check_huge)(void *addr, int nr_hpages);
+ const char *name;
+};
+
+static struct mem_ops *file_ops;
+static struct mem_ops *anon_ops;
+static struct mem_ops *shmem_ops;
+
+struct collapse_context {
+ void (*collapse)(const char *msg, char *p, int nr_hpages,
+ struct mem_ops *ops, bool expect);
+ bool enforce_pte_scan_limits;
+ const char *name;
+};
+
+static struct collapse_context *khugepaged_context;
+static struct collapse_context *madvise_context;
+
+struct file_info {
+ const char *dir;
+ char path[PATH_MAX];
+ enum vma_type type;
+ int fd;
+ char dev_queue_read_ahead_path[PATH_MAX];
+};
+
+static struct file_info finfo;
enum thp_enabled {
THP_ALWAYS,
@@ -88,18 +146,7 @@ struct settings {
enum shmem_enabled shmem_enabled;
bool use_zero_page;
struct khugepaged_settings khugepaged;
-};
-
-static struct settings default_settings = {
- .thp_enabled = THP_MADVISE,
- .thp_defrag = THP_DEFRAG_ALWAYS,
- .shmem_enabled = SHMEM_NEVER,
- .use_zero_page = 0,
- .khugepaged = {
- .defrag = 1,
- .alloc_sleep_millisecs = 10,
- .scan_sleep_millisecs = 10,
- },
+ unsigned long read_ahead_kb;
};
static struct settings saved_settings;
@@ -118,6 +165,11 @@ static void fail(const char *msg)
exit_status++;
}
+static void skip(const char *msg)
+{
+ printf(" \e[33m%s\e[0m\n", msg);
+}
+
static int read_file(const char *path, char *buf, size_t buflen)
{
int fd;
@@ -145,13 +197,19 @@ static int write_file(const char *path, const char *buf, size_t buflen)
ssize_t numwritten;
fd = open(path, O_WRONLY);
- if (fd == -1)
+ if (fd == -1) {
+ printf("open(%s)\n", path);
+ exit(EXIT_FAILURE);
return 0;
+ }
numwritten = write(fd, buf, buflen - 1);
close(fd);
- if (numwritten < 1)
+ if (numwritten < 1) {
+ printf("write(%s)\n", buf);
+ exit(EXIT_FAILURE);
return 0;
+ }
return (unsigned int) numwritten;
}
@@ -218,20 +276,11 @@ static void write_string(const char *name, const char *val)
}
}
-static const unsigned long read_num(const char *name)
+static const unsigned long _read_num(const char *path)
{
- char path[PATH_MAX];
char buf[21];
- int ret;
-
- ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
- if (ret >= PATH_MAX) {
- printf("%s: Pathname is too long\n", __func__);
- exit(EXIT_FAILURE);
- }
- ret = read_file(path, buf, sizeof(buf));
- if (ret < 0) {
+ if (read_file(path, buf, sizeof(buf)) < 0) {
perror("read_file(read_num)");
exit(EXIT_FAILURE);
}
@@ -239,10 +288,9 @@ static const unsigned long read_num(const char *name)
return strtoul(buf, NULL, 10);
}
-static void write_num(const char *name, unsigned long num)
+static const unsigned long read_num(const char *name)
{
char path[PATH_MAX];
- char buf[21];
int ret;
ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
@@ -250,6 +298,12 @@ static void write_num(const char *name, unsigned long num)
printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
+ return _read_num(path);
+}
+
+static void _write_num(const char *path, unsigned long num)
+{
+ char buf[21];
sprintf(buf, "%ld", num);
if (!write_file(path, buf, strlen(buf) + 1)) {
@@ -258,6 +312,19 @@ static void write_num(const char *name, unsigned long num)
}
}
+static void write_num(const char *name, unsigned long num)
+{
+ char path[PATH_MAX];
+ int ret;
+
+ ret = snprintf(path, PATH_MAX, THP_SYSFS "%s", name);
+ if (ret >= PATH_MAX) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ _write_num(path, num);
+}
+
static void write_settings(struct settings *settings)
{
struct khugepaged_settings *khugepaged = &settings->khugepaged;
@@ -277,6 +344,43 @@ static void write_settings(struct settings *settings)
write_num("khugepaged/max_ptes_swap", khugepaged->max_ptes_swap);
write_num("khugepaged/max_ptes_shared", khugepaged->max_ptes_shared);
write_num("khugepaged/pages_to_scan", khugepaged->pages_to_scan);
+
+ if (file_ops && finfo.type == VMA_FILE)
+ _write_num(finfo.dev_queue_read_ahead_path,
+ settings->read_ahead_kb);
+}
+
+#define MAX_SETTINGS_DEPTH 4
+static struct settings settings_stack[MAX_SETTINGS_DEPTH];
+static int settings_index;
+
+static struct settings *current_settings(void)
+{
+ if (!settings_index) {
+ printf("Fail: No settings set");
+ exit(EXIT_FAILURE);
+ }
+ return settings_stack + settings_index - 1;
+}
+
+static void push_settings(struct settings *settings)
+{
+ if (settings_index >= MAX_SETTINGS_DEPTH) {
+ printf("Fail: Settings stack exceeded");
+ exit(EXIT_FAILURE);
+ }
+ settings_stack[settings_index++] = *settings;
+ write_settings(current_settings());
+}
+
+static void pop_settings(void)
+{
+ if (settings_index <= 0) {
+ printf("Fail: Settings stack empty");
+ exit(EXIT_FAILURE);
+ }
+ --settings_index;
+ write_settings(current_settings());
}
static void restore_settings(int sig)
@@ -314,6 +418,10 @@ static void save_settings(void)
.max_ptes_shared = read_num("khugepaged/max_ptes_shared"),
.pages_to_scan = read_num("khugepaged/pages_to_scan"),
};
+ if (file_ops && finfo.type == VMA_FILE)
+ saved_settings.read_ahead_kb =
+ _read_num(finfo.dev_queue_read_ahead_path);
+
success("OK");
signal(SIGTERM, restore_settings);
@@ -322,72 +430,90 @@ static void save_settings(void)
signal(SIGQUIT, restore_settings);
}
-static void adjust_settings(void)
+static void get_finfo(const char *dir)
{
+ struct stat path_stat;
+ struct statfs fs;
+ char buf[1 << 10];
+ char path[PATH_MAX];
+ char *str, *end;
- printf("Adjust settings...");
- write_settings(&default_settings);
- success("OK");
-}
-
-#define MAX_LINE_LENGTH 500
-
-static bool check_for_pattern(FILE *fp, char *pattern, char *buf)
-{
- while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) {
- if (!strncmp(buf, pattern, strlen(pattern)))
- return true;
+ finfo.dir = dir;
+ stat(finfo.dir, &path_stat);
+ if (!S_ISDIR(path_stat.st_mode)) {
+ printf("%s: Not a directory (%s)\n", __func__, finfo.dir);
+ exit(EXIT_FAILURE);
}
- return false;
-}
-
-static bool check_huge(void *addr)
-{
- bool thp = false;
- int ret;
- FILE *fp;
- char buffer[MAX_LINE_LENGTH];
- char addr_pattern[MAX_LINE_LENGTH];
-
- ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-",
- (unsigned long) addr);
- if (ret >= MAX_LINE_LENGTH) {
- printf("%s: Pattern is too long\n", __func__);
+ if (snprintf(finfo.path, sizeof(finfo.path), "%s/" TEST_FILE,
+ finfo.dir) >= sizeof(finfo.path)) {
+ printf("%s: Pathname is too long\n", __func__);
exit(EXIT_FAILURE);
}
-
-
- fp = fopen(PID_SMAPS, "r");
- if (!fp) {
- printf("%s: Failed to open file %s\n", __func__, PID_SMAPS);
+ if (statfs(finfo.dir, &fs)) {
+ perror("statfs()");
exit(EXIT_FAILURE);
}
- if (!check_for_pattern(fp, addr_pattern, buffer))
- goto err_out;
-
- ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "AnonHugePages:%10ld kB",
- hpage_pmd_size >> 10);
- if (ret >= MAX_LINE_LENGTH) {
- printf("%s: Pattern is too long\n", __func__);
+ finfo.type = fs.f_type == TMPFS_MAGIC ? VMA_SHMEM : VMA_FILE;
+ if (finfo.type == VMA_SHMEM)
+ return;
+
+ /* Find owning device's queue/read_ahead_kb control */
+ if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d/uevent",
+ major(path_stat.st_dev), minor(path_stat.st_dev))
+ >= sizeof(path)) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ if (read_file(path, buf, sizeof(buf)) < 0) {
+ perror("read_file(read_num)");
+ exit(EXIT_FAILURE);
+ }
+ if (strstr(buf, "DEVTYPE=disk")) {
+ /* Found it */
+ if (snprintf(finfo.dev_queue_read_ahead_path,
+ sizeof(finfo.dev_queue_read_ahead_path),
+ "/sys/dev/block/%d:%d/queue/read_ahead_kb",
+ major(path_stat.st_dev), minor(path_stat.st_dev))
+ >= sizeof(finfo.dev_queue_read_ahead_path)) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ return;
+ }
+ if (!strstr(buf, "DEVTYPE=partition")) {
+ printf("%s: Unknown device type: %s\n", __func__, path);
exit(EXIT_FAILURE);
}
/*
- * Fetch the AnonHugePages: in the same block and check whether it got
- * the expected number of hugeepages next.
+ * Partition of block device - need to find actual device.
+ * Using naming convention that devnameN is partition of
+ * device devname.
*/
- if (!check_for_pattern(fp, "AnonHugePages:", buffer))
- goto err_out;
-
- if (strncmp(buffer, addr_pattern, strlen(addr_pattern)))
- goto err_out;
-
- thp = true;
-err_out:
- fclose(fp);
- return thp;
+ str = strstr(buf, "DEVNAME=");
+ if (!str) {
+ printf("%s: Could not read: %s", __func__, path);
+ exit(EXIT_FAILURE);
+ }
+ str += 8;
+ end = str;
+ while (*end) {
+ if (isdigit(*end)) {
+ *end = '\0';
+ if (snprintf(finfo.dev_queue_read_ahead_path,
+ sizeof(finfo.dev_queue_read_ahead_path),
+ "/sys/block/%s/queue/read_ahead_kb",
+ str) >= sizeof(finfo.dev_queue_read_ahead_path)) {
+ printf("%s: Pathname is too long\n", __func__);
+ exit(EXIT_FAILURE);
+ }
+ return;
+ }
+ ++end;
+ }
+ printf("%s: Could not read: %s\n", __func__, path);
+ exit(EXIT_FAILURE);
}
-
static bool check_swap(void *addr, unsigned long size)
{
bool swap = false;
@@ -409,7 +535,7 @@ static bool check_swap(void *addr, unsigned long size)
printf("%s: Failed to open file %s\n", __func__, PID_SMAPS);
exit(EXIT_FAILURE);
}
- if (!check_for_pattern(fp, addr_pattern, buffer))
+ if (!check_for_pattern(fp, addr_pattern, buffer, sizeof(buffer)))
goto err_out;
ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "Swap:%19ld kB",
@@ -422,7 +548,7 @@ static bool check_swap(void *addr, unsigned long size)
* Fetch the Swap: in the same block and check whether it got
* the expected number of hugeepages next.
*/
- if (!check_for_pattern(fp, "Swap:", buffer))
+ if (!check_for_pattern(fp, "Swap:", buffer, sizeof(buffer)))
goto err_out;
if (strncmp(buffer, addr_pattern, strlen(addr_pattern)))
@@ -434,12 +560,12 @@ err_out:
return swap;
}
-static void *alloc_mapping(void)
+static void *alloc_mapping(int nr)
{
void *p;
- p = mmap(BASE_ADDR, hpage_pmd_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ p = mmap(BASE_ADDR, nr * hpage_pmd_size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
if (p != BASE_ADDR) {
printf("Failed to allocate VMA at %p\n", BASE_ADDR);
exit(EXIT_FAILURE);
@@ -456,6 +582,60 @@ static void fill_memory(int *p, unsigned long start, unsigned long end)
p[i * page_size / sizeof(*p)] = i + 0xdead0000;
}
+/*
+ * MADV_COLLAPSE is a best-effort request and may fail if an internal
+ * resource is temporarily unavailable, in which case it will set errno to
+ * EAGAIN. In such a case, immediately reattempt the operation one more
+ * time.
+ */
+static int madvise_collapse_retry(void *p, unsigned long size)
+{
+ bool retry = true;
+ int ret;
+
+retry:
+ ret = madvise(p, size, MADV_COLLAPSE);
+ if (ret && errno == EAGAIN && retry) {
+ retry = false;
+ goto retry;
+ }
+ return ret;
+}
+
+/*
+ * Returns pmd-mapped hugepage in VMA marked VM_HUGEPAGE, filled with
+ * validate_memory()'able contents.
+ */
+static void *alloc_hpage(struct mem_ops *ops)
+{
+ void *p = ops->setup_area(1);
+
+ ops->fault(p, 0, hpage_pmd_size);
+
+ /*
+ * VMA should be neither VM_HUGEPAGE nor VM_NOHUGEPAGE.
+ * The latter is ineligible for collapse by MADV_COLLAPSE
+ * while the former might cause MADV_COLLAPSE to race with
+ * khugepaged on low-load system (like a test machine), which
+ * would cause MADV_COLLAPSE to fail with EAGAIN.
+ */
+ printf("Allocate huge page...");
+ if (madvise_collapse_retry(p, hpage_pmd_size)) {
+ perror("madvise(MADV_COLLAPSE)");
+ exit(EXIT_FAILURE);
+ }
+ if (!ops->check_huge(p, 1)) {
+ perror("madvise(MADV_COLLAPSE)");
+ exit(EXIT_FAILURE);
+ }
+ if (madvise(p, hpage_pmd_size, MADV_HUGEPAGE)) {
+ perror("madvise(MADV_HUGEPAGE)");
+ exit(EXIT_FAILURE);
+ }
+ success("OK");
+ return p;
+}
+
static void validate_memory(int *p, unsigned long start, unsigned long end)
{
int i;
@@ -469,26 +649,216 @@ static void validate_memory(int *p, unsigned long start, unsigned long end)
}
}
+static void *anon_setup_area(int nr_hpages)
+{
+ return alloc_mapping(nr_hpages);
+}
+
+static void anon_cleanup_area(void *p, unsigned long size)
+{
+ munmap(p, size);
+}
+
+static void anon_fault(void *p, unsigned long start, unsigned long end)
+{
+ fill_memory(p, start, end);
+}
+
+static bool anon_check_huge(void *addr, int nr_hpages)
+{
+ return check_huge_anon(addr, nr_hpages, hpage_pmd_size);
+}
+
+static void *file_setup_area(int nr_hpages)
+{
+ int fd;
+ void *p;
+ unsigned long size;
+
+ unlink(finfo.path); /* Cleanup from previous failed tests */
+ printf("Creating %s for collapse%s...", finfo.path,
+ finfo.type == VMA_SHMEM ? " (tmpfs)" : "");
+ fd = open(finfo.path, O_DSYNC | O_CREAT | O_RDWR | O_TRUNC | O_EXCL,
+ 777);
+ if (fd < 0) {
+ perror("open()");
+ exit(EXIT_FAILURE);
+ }
+
+ size = nr_hpages * hpage_pmd_size;
+ p = alloc_mapping(nr_hpages);
+ fill_memory(p, 0, size);
+ write(fd, p, size);
+ close(fd);
+ munmap(p, size);
+ success("OK");
+
+ printf("Opening %s read only for collapse...", finfo.path);
+ finfo.fd = open(finfo.path, O_RDONLY, 777);
+ if (finfo.fd < 0) {
+ perror("open()");
+ exit(EXIT_FAILURE);
+ }
+ p = mmap(BASE_ADDR, size, PROT_READ | PROT_EXEC,
+ MAP_PRIVATE, finfo.fd, 0);
+ if (p == MAP_FAILED || p != BASE_ADDR) {
+ perror("mmap()");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Drop page cache */
+ write_file("/proc/sys/vm/drop_caches", "3", 2);
+ success("OK");
+ return p;
+}
+
+static void file_cleanup_area(void *p, unsigned long size)
+{
+ munmap(p, size);
+ close(finfo.fd);
+ unlink(finfo.path);
+}
+
+static void file_fault(void *p, unsigned long start, unsigned long end)
+{
+ if (madvise(((char *)p) + start, end - start, MADV_POPULATE_READ)) {
+ perror("madvise(MADV_POPULATE_READ");
+ exit(EXIT_FAILURE);
+ }
+}
+
+static bool file_check_huge(void *addr, int nr_hpages)
+{
+ switch (finfo.type) {
+ case VMA_FILE:
+ return check_huge_file(addr, nr_hpages, hpage_pmd_size);
+ case VMA_SHMEM:
+ return check_huge_shmem(addr, nr_hpages, hpage_pmd_size);
+ default:
+ exit(EXIT_FAILURE);
+ return false;
+ }
+}
+
+static void *shmem_setup_area(int nr_hpages)
+{
+ void *p;
+ unsigned long size = nr_hpages * hpage_pmd_size;
+
+ finfo.fd = memfd_create("khugepaged-selftest-collapse-shmem", 0);
+ if (finfo.fd < 0) {
+ perror("memfd_create()");
+ exit(EXIT_FAILURE);
+ }
+ if (ftruncate(finfo.fd, size)) {
+ perror("ftruncate()");
+ exit(EXIT_FAILURE);
+ }
+ p = mmap(BASE_ADDR, size, PROT_READ | PROT_WRITE, MAP_SHARED, finfo.fd,
+ 0);
+ if (p != BASE_ADDR) {
+ perror("mmap()");
+ exit(EXIT_FAILURE);
+ }
+ return p;
+}
+
+static void shmem_cleanup_area(void *p, unsigned long size)
+{
+ munmap(p, size);
+ close(finfo.fd);
+}
+
+static bool shmem_check_huge(void *addr, int nr_hpages)
+{
+ return check_huge_shmem(addr, nr_hpages, hpage_pmd_size);
+}
+
+static struct mem_ops __anon_ops = {
+ .setup_area = &anon_setup_area,
+ .cleanup_area = &anon_cleanup_area,
+ .fault = &anon_fault,
+ .check_huge = &anon_check_huge,
+ .name = "anon",
+};
+
+static struct mem_ops __file_ops = {
+ .setup_area = &file_setup_area,
+ .cleanup_area = &file_cleanup_area,
+ .fault = &file_fault,
+ .check_huge = &file_check_huge,
+ .name = "file",
+};
+
+static struct mem_ops __shmem_ops = {
+ .setup_area = &shmem_setup_area,
+ .cleanup_area = &shmem_cleanup_area,
+ .fault = &anon_fault,
+ .check_huge = &shmem_check_huge,
+ .name = "shmem",
+};
+
+static void __madvise_collapse(const char *msg, char *p, int nr_hpages,
+ struct mem_ops *ops, bool expect)
+{
+ int ret;
+ struct settings settings = *current_settings();
+
+ printf("%s...", msg);
+
+ /*
+ * Prevent khugepaged interference and tests that MADV_COLLAPSE
+ * ignores /sys/kernel/mm/transparent_hugepage/enabled
+ */
+ settings.thp_enabled = THP_NEVER;
+ settings.shmem_enabled = SHMEM_NEVER;
+ push_settings(&settings);
+
+ /* Clear VM_NOHUGEPAGE */
+ madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
+ ret = madvise_collapse_retry(p, nr_hpages * hpage_pmd_size);
+ if (((bool)ret) == expect)
+ fail("Fail: Bad return value");
+ else if (!ops->check_huge(p, expect ? nr_hpages : 0))
+ fail("Fail: check_huge()");
+ else
+ success("OK");
+
+ pop_settings();
+}
+
+static void madvise_collapse(const char *msg, char *p, int nr_hpages,
+ struct mem_ops *ops, bool expect)
+{
+ /* Sanity check */
+ if (!ops->check_huge(p, 0)) {
+ printf("Unexpected huge page\n");
+ exit(EXIT_FAILURE);
+ }
+ __madvise_collapse(msg, p, nr_hpages, ops, expect);
+}
+
#define TICK 500000
-static bool wait_for_scan(const char *msg, char *p)
+static bool wait_for_scan(const char *msg, char *p, int nr_hpages,
+ struct mem_ops *ops)
{
int full_scans;
int timeout = 6; /* 3 seconds */
/* Sanity check */
- if (check_huge(p)) {
+ if (!ops->check_huge(p, 0)) {
printf("Unexpected huge page\n");
exit(EXIT_FAILURE);
}
- madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
+ madvise(p, nr_hpages * hpage_pmd_size, MADV_HUGEPAGE);
/* Wait until the second full_scan completed */
full_scans = read_num("khugepaged/full_scans") + 2;
printf("%s...", msg);
while (timeout--) {
- if (check_huge(p))
+ if (ops->check_huge(p, nr_hpages))
break;
if (read_num("khugepaged/full_scans") >= full_scans)
break;
@@ -496,122 +866,155 @@ static bool wait_for_scan(const char *msg, char *p)
usleep(TICK);
}
- madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+ madvise(p, nr_hpages * hpage_pmd_size, MADV_NOHUGEPAGE);
return timeout == -1;
}
+static void khugepaged_collapse(const char *msg, char *p, int nr_hpages,
+ struct mem_ops *ops, bool expect)
+{
+ if (wait_for_scan(msg, p, nr_hpages, ops)) {
+ if (expect)
+ fail("Timeout");
+ else
+ success("OK");
+ return;
+ }
+
+ /*
+ * For file and shmem memory, khugepaged only retracts pte entries after
+ * putting the new hugepage in the page cache. The hugepage must be
+ * subsequently refaulted to install the pmd mapping for the mm.
+ */
+ if (ops != &__anon_ops)
+ ops->fault(p, 0, nr_hpages * hpage_pmd_size);
+
+ if (ops->check_huge(p, expect ? nr_hpages : 0))
+ success("OK");
+ else
+ fail("Fail");
+}
+
+static struct collapse_context __khugepaged_context = {
+ .collapse = &khugepaged_collapse,
+ .enforce_pte_scan_limits = true,
+ .name = "khugepaged",
+};
+
+static struct collapse_context __madvise_context = {
+ .collapse = &madvise_collapse,
+ .enforce_pte_scan_limits = false,
+ .name = "madvise",
+};
+
+static bool is_tmpfs(struct mem_ops *ops)
+{
+ return ops == &__file_ops && finfo.type == VMA_SHMEM;
+}
+
static void alloc_at_fault(void)
{
- struct settings settings = default_settings;
+ struct settings settings = *current_settings();
char *p;
settings.thp_enabled = THP_ALWAYS;
- write_settings(&settings);
+ push_settings(&settings);
- p = alloc_mapping();
+ p = alloc_mapping(1);
*p = 1;
printf("Allocate huge page on fault...");
- if (check_huge(p))
+ if (check_huge_anon(p, 1, hpage_pmd_size))
success("OK");
else
fail("Fail");
- write_settings(&default_settings);
+ pop_settings();
madvise(p, page_size, MADV_DONTNEED);
printf("Split huge PMD on MADV_DONTNEED...");
- if (!check_huge(p))
+ if (check_huge_anon(p, 0, hpage_pmd_size))
success("OK");
else
fail("Fail");
munmap(p, hpage_pmd_size);
}
-static void collapse_full(void)
+static void collapse_full(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
-
- p = alloc_mapping();
- fill_memory(p, 0, hpage_pmd_size);
- if (wait_for_scan("Collapse fully populated PTE table", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
- validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ int nr_hpages = 4;
+ unsigned long size = nr_hpages * hpage_pmd_size;
+
+ p = ops->setup_area(nr_hpages);
+ ops->fault(p, 0, size);
+ c->collapse("Collapse multiple fully populated PTE table", p, nr_hpages,
+ ops, true);
+ validate_memory(p, 0, size);
+ ops->cleanup_area(p, size);
}
-static void collapse_empty(void)
+static void collapse_empty(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
- p = alloc_mapping();
- if (wait_for_scan("Do not collapse empty PTE table", p))
- fail("Timeout");
- else if (check_huge(p))
- fail("Fail");
- else
- success("OK");
- munmap(p, hpage_pmd_size);
+ p = ops->setup_area(1);
+ c->collapse("Do not collapse empty PTE table", p, 1, ops, false);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_single_pte_entry(void)
+static void collapse_single_pte_entry(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
- p = alloc_mapping();
- fill_memory(p, 0, page_size);
- if (wait_for_scan("Collapse PTE table with single PTE entry present", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
- validate_memory(p, 0, page_size);
- munmap(p, hpage_pmd_size);
+ p = ops->setup_area(1);
+ ops->fault(p, 0, page_size);
+ c->collapse("Collapse PTE table with single PTE entry present", p,
+ 1, ops, true);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_max_ptes_none(void)
+static void collapse_max_ptes_none(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_none = hpage_pmd_nr / 2;
- struct settings settings = default_settings;
+ struct settings settings = *current_settings();
void *p;
settings.khugepaged.max_ptes_none = max_ptes_none;
- write_settings(&settings);
+ push_settings(&settings);
- p = alloc_mapping();
+ p = ops->setup_area(1);
- fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
- if (wait_for_scan("Do not collapse with max_ptes_none exceeded", p))
- fail("Timeout");
- else if (check_huge(p))
- fail("Fail");
- else
- success("OK");
- validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+ if (is_tmpfs(ops)) {
+ /* shmem pages always in the page cache */
+ printf("tmpfs...");
+ skip("Skip");
+ goto skip;
+ }
- fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
- if (wait_for_scan("Collapse with max_ptes_none PTEs empty", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
- validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
+ ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
+ c->collapse("Maybe collapse with max_ptes_none exceeded", p, 1,
+ ops, !c->enforce_pte_scan_limits);
+ validate_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size);
- munmap(p, hpage_pmd_size);
- write_settings(&default_settings);
+ if (c->enforce_pte_scan_limits) {
+ ops->fault(p, 0, (hpage_pmd_nr - max_ptes_none) * page_size);
+ c->collapse("Collapse with max_ptes_none PTEs empty", p, 1, ops,
+ true);
+ validate_memory(p, 0,
+ (hpage_pmd_nr - max_ptes_none) * page_size);
+ }
+skip:
+ ops->cleanup_area(p, hpage_pmd_size);
+ pop_settings();
}
-static void collapse_swapin_single_pte(void)
+static void collapse_swapin_single_pte(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
- p = alloc_mapping();
- fill_memory(p, 0, hpage_pmd_size);
+
+ p = ops->setup_area(1);
+ ops->fault(p, 0, hpage_pmd_size);
printf("Swapout one page...");
if (madvise(p, page_size, MADV_PAGEOUT)) {
@@ -625,25 +1028,21 @@ static void collapse_swapin_single_pte(void)
goto out;
}
- if (wait_for_scan("Collapse with swapping in single PTE entry", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Collapse with swapping in single PTE entry", p, 1, ops,
+ true);
validate_memory(p, 0, hpage_pmd_size);
out:
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_max_ptes_swap(void)
+static void collapse_max_ptes_swap(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_swap = read_num("khugepaged/max_ptes_swap");
void *p;
- p = alloc_mapping();
+ p = ops->setup_area(1);
+ ops->fault(p, 0, hpage_pmd_size);
- fill_memory(p, 0, hpage_pmd_size);
printf("Swapout %d of %d pages...", max_ptes_swap + 1, hpage_pmd_nr);
if (madvise(p, (max_ptes_swap + 1) * page_size, MADV_PAGEOUT)) {
perror("madvise(MADV_PAGEOUT)");
@@ -656,115 +1055,93 @@ static void collapse_max_ptes_swap(void)
goto out;
}
- if (wait_for_scan("Do not collapse with max_ptes_swap exceeded", p))
- fail("Timeout");
- else if (check_huge(p))
- fail("Fail");
- else
- success("OK");
+ c->collapse("Maybe collapse with max_ptes_swap exceeded", p, 1, ops,
+ !c->enforce_pte_scan_limits);
validate_memory(p, 0, hpage_pmd_size);
- fill_memory(p, 0, hpage_pmd_size);
- printf("Swapout %d of %d pages...", max_ptes_swap, hpage_pmd_nr);
- if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) {
- perror("madvise(MADV_PAGEOUT)");
- exit(EXIT_FAILURE);
- }
- if (check_swap(p, max_ptes_swap * page_size)) {
- success("OK");
- } else {
- fail("Fail");
- goto out;
- }
+ if (c->enforce_pte_scan_limits) {
+ ops->fault(p, 0, hpage_pmd_size);
+ printf("Swapout %d of %d pages...", max_ptes_swap,
+ hpage_pmd_nr);
+ if (madvise(p, max_ptes_swap * page_size, MADV_PAGEOUT)) {
+ perror("madvise(MADV_PAGEOUT)");
+ exit(EXIT_FAILURE);
+ }
+ if (check_swap(p, max_ptes_swap * page_size)) {
+ success("OK");
+ } else {
+ fail("Fail");
+ goto out;
+ }
- if (wait_for_scan("Collapse with max_ptes_swap pages swapped out", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
- validate_memory(p, 0, hpage_pmd_size);
+ c->collapse("Collapse with max_ptes_swap pages swapped out", p,
+ 1, ops, true);
+ validate_memory(p, 0, hpage_pmd_size);
+ }
out:
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_single_pte_entry_compound(void)
+static void collapse_single_pte_entry_compound(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
- p = alloc_mapping();
+ p = alloc_hpage(ops);
- printf("Allocate huge page...");
- madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
- fill_memory(p, 0, hpage_pmd_size);
- if (check_huge(p))
- success("OK");
- else
- fail("Fail");
- madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
+ if (is_tmpfs(ops)) {
+ /* MADV_DONTNEED won't evict tmpfs pages */
+ printf("tmpfs...");
+ skip("Skip");
+ goto skip;
+ }
+ madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
printf("Split huge page leaving single PTE mapping compound page...");
madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED);
- if (!check_huge(p))
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
- if (wait_for_scan("Collapse PTE table with single PTE mapping compound page", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Collapse PTE table with single PTE mapping compound page",
+ p, 1, ops, true);
validate_memory(p, 0, page_size);
- munmap(p, hpage_pmd_size);
+skip:
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_full_of_compound(void)
+static void collapse_full_of_compound(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
- p = alloc_mapping();
-
- printf("Allocate huge page...");
- madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
- fill_memory(p, 0, hpage_pmd_size);
- if (check_huge(p))
- success("OK");
- else
- fail("Fail");
-
+ p = alloc_hpage(ops);
printf("Split huge page leaving single PTE page table full of compound pages...");
madvise(p, page_size, MADV_NOHUGEPAGE);
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
- if (!check_huge(p))
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
- if (wait_for_scan("Collapse PTE table full of compound pages", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Collapse PTE table full of compound pages", p, 1, ops,
+ true);
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_compound_extreme(void)
+static void collapse_compound_extreme(struct collapse_context *c, struct mem_ops *ops)
{
void *p;
int i;
- p = alloc_mapping();
+ p = ops->setup_area(1);
for (i = 0; i < hpage_pmd_nr; i++) {
printf("\rConstruct PTE page table full of different PTE-mapped compound pages %3d/%d...",
i + 1, hpage_pmd_nr);
madvise(BASE_ADDR, hpage_pmd_size, MADV_HUGEPAGE);
- fill_memory(BASE_ADDR, 0, hpage_pmd_size);
- if (!check_huge(BASE_ADDR)) {
+ ops->fault(BASE_ADDR, 0, hpage_pmd_size);
+ if (!ops->check_huge(BASE_ADDR, 1)) {
printf("Failed to allocate huge page\n");
exit(EXIT_FAILURE);
}
@@ -791,34 +1168,30 @@ static void collapse_compound_extreme(void)
}
}
- munmap(BASE_ADDR, hpage_pmd_size);
- fill_memory(p, 0, hpage_pmd_size);
- if (!check_huge(p))
+ ops->cleanup_area(BASE_ADDR, hpage_pmd_size);
+ ops->fault(p, 0, hpage_pmd_size);
+ if (!ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
- if (wait_for_scan("Collapse PTE table full of different compound pages", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Collapse PTE table full of different compound pages", p, 1,
+ ops, true);
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_fork(void)
+static void collapse_fork(struct collapse_context *c, struct mem_ops *ops)
{
int wstatus;
void *p;
- p = alloc_mapping();
+ p = ops->setup_area(1);
printf("Allocate small page...");
- fill_memory(p, 0, page_size);
- if (!check_huge(p))
+ ops->fault(p, 0, page_size);
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
@@ -829,22 +1202,17 @@ static void collapse_fork(void)
skip_settings_restore = true;
exit_status = 0;
- if (!check_huge(p))
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
- fill_memory(p, page_size, 2 * page_size);
-
- if (wait_for_scan("Collapse PTE table with single page shared with parent process", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ ops->fault(p, page_size, 2 * page_size);
+ c->collapse("Collapse PTE table with single page shared with parent process",
+ p, 1, ops, true);
validate_memory(p, 0, page_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
@@ -852,36 +1220,27 @@ static void collapse_fork(void)
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has small page...");
- if (!check_huge(p))
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
validate_memory(p, 0, page_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_fork_compound(void)
+static void collapse_fork_compound(struct collapse_context *c, struct mem_ops *ops)
{
int wstatus;
void *p;
- p = alloc_mapping();
-
- printf("Allocate huge page...");
- madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
- fill_memory(p, 0, hpage_pmd_size);
- if (check_huge(p))
- success("OK");
- else
- fail("Fail");
-
+ p = alloc_hpage(ops);
printf("Share huge page over fork()...");
if (!fork()) {
/* Do not touch settings on child exit */
skip_settings_restore = true;
exit_status = 0;
- if (check_huge(p))
+ if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
@@ -889,24 +1248,20 @@ static void collapse_fork_compound(void)
printf("Split huge page PMD in child process...");
madvise(p, page_size, MADV_NOHUGEPAGE);
madvise(p, hpage_pmd_size, MADV_NOHUGEPAGE);
- if (!check_huge(p))
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
- fill_memory(p, 0, page_size);
+ ops->fault(p, 0, page_size);
write_num("khugepaged/max_ptes_shared", hpage_pmd_nr - 1);
- if (wait_for_scan("Collapse PTE table full of compound pages in child", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Collapse PTE table full of compound pages in child",
+ p, 1, ops, true);
write_num("khugepaged/max_ptes_shared",
- default_settings.khugepaged.max_ptes_shared);
+ current_settings()->khugepaged.max_ptes_shared);
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
@@ -914,74 +1269,59 @@ static void collapse_fork_compound(void)
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has huge page...");
- if (check_huge(p))
+ if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-static void collapse_max_ptes_shared()
+static void collapse_max_ptes_shared(struct collapse_context *c, struct mem_ops *ops)
{
int max_ptes_shared = read_num("khugepaged/max_ptes_shared");
int wstatus;
void *p;
- p = alloc_mapping();
-
- printf("Allocate huge page...");
- madvise(p, hpage_pmd_size, MADV_HUGEPAGE);
- fill_memory(p, 0, hpage_pmd_size);
- if (check_huge(p))
- success("OK");
- else
- fail("Fail");
-
+ p = alloc_hpage(ops);
printf("Share huge page over fork()...");
if (!fork()) {
/* Do not touch settings on child exit */
skip_settings_restore = true;
exit_status = 0;
- if (check_huge(p))
+ if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
printf("Trigger CoW on page %d of %d...",
hpage_pmd_nr - max_ptes_shared - 1, hpage_pmd_nr);
- fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size);
- if (!check_huge(p))
- success("OK");
- else
- fail("Fail");
-
- if (wait_for_scan("Do not collapse with max_ptes_shared exceeded", p))
- fail("Timeout");
- else if (!check_huge(p))
- success("OK");
- else
- fail("Fail");
-
- printf("Trigger CoW on page %d of %d...",
- hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr);
- fill_memory(p, 0, (hpage_pmd_nr - max_ptes_shared) * page_size);
- if (!check_huge(p))
+ ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared - 1) * page_size);
+ if (ops->check_huge(p, 0))
success("OK");
else
fail("Fail");
-
- if (wait_for_scan("Collapse with max_ptes_shared PTEs shared", p))
- fail("Timeout");
- else if (check_huge(p))
- success("OK");
- else
- fail("Fail");
+ c->collapse("Maybe collapse with max_ptes_shared exceeded", p,
+ 1, ops, !c->enforce_pte_scan_limits);
+
+ if (c->enforce_pte_scan_limits) {
+ printf("Trigger CoW on page %d of %d...",
+ hpage_pmd_nr - max_ptes_shared, hpage_pmd_nr);
+ ops->fault(p, 0, (hpage_pmd_nr - max_ptes_shared) *
+ page_size);
+ if (ops->check_huge(p, 0))
+ success("OK");
+ else
+ fail("Fail");
+
+ c->collapse("Collapse with max_ptes_shared PTEs shared",
+ p, 1, ops, true);
+ }
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
exit(exit_status);
}
@@ -989,20 +1329,153 @@ static void collapse_max_ptes_shared()
exit_status += WEXITSTATUS(wstatus);
printf("Check if parent still has huge page...");
- if (check_huge(p))
+ if (ops->check_huge(p, 1))
success("OK");
else
fail("Fail");
validate_memory(p, 0, hpage_pmd_size);
- munmap(p, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
}
-int main(void)
+static void madvise_collapse_existing_thps(struct collapse_context *c,
+ struct mem_ops *ops)
{
+ void *p;
+
+ p = ops->setup_area(1);
+ ops->fault(p, 0, hpage_pmd_size);
+ c->collapse("Collapse fully populated PTE table...", p, 1, ops, true);
+ validate_memory(p, 0, hpage_pmd_size);
+
+ /* c->collapse() will find a hugepage and complain - call directly. */
+ __madvise_collapse("Re-collapse PMD-mapped hugepage", p, 1, ops, true);
+ validate_memory(p, 0, hpage_pmd_size);
+ ops->cleanup_area(p, hpage_pmd_size);
+}
+
+/*
+ * Test race with khugepaged where page tables have been retracted and
+ * pmd cleared.
+ */
+static void madvise_retracted_page_tables(struct collapse_context *c,
+ struct mem_ops *ops)
+{
+ void *p;
+ int nr_hpages = 1;
+ unsigned long size = nr_hpages * hpage_pmd_size;
+
+ p = ops->setup_area(nr_hpages);
+ ops->fault(p, 0, size);
+
+ /* Let khugepaged collapse and leave pmd cleared */
+ if (wait_for_scan("Collapse and leave PMD cleared", p, nr_hpages,
+ ops)) {
+ fail("Timeout");
+ return;
+ }
+ success("OK");
+ c->collapse("Install huge PMD from page cache", p, nr_hpages, ops,
+ true);
+ validate_memory(p, 0, size);
+ ops->cleanup_area(p, size);
+}
+
+static void usage(void)
+{
+ fprintf(stderr, "\nUsage: ./khugepaged <test type> [dir]\n\n");
+ fprintf(stderr, "\t<test type>\t: <context>:<mem_type>\n");
+ fprintf(stderr, "\t<context>\t: [all|khugepaged|madvise]\n");
+ fprintf(stderr, "\t<mem_type>\t: [all|anon|file|shmem]\n");
+ fprintf(stderr, "\n\t\"file,all\" mem_type requires [dir] argument\n");
+ fprintf(stderr, "\n\t\"file,all\" mem_type requires kernel built with\n");
+ fprintf(stderr, "\tCONFIG_READ_ONLY_THP_FOR_FS=y\n");
+ fprintf(stderr, "\n\tif [dir] is a (sub)directory of a tmpfs mount, tmpfs must be\n");
+ fprintf(stderr, "\tmounted with huge=madvise option for khugepaged tests to work\n");
+ exit(1);
+}
+
+static void parse_test_type(int argc, const char **argv)
+{
+ char *buf;
+ const char *token;
+
+ if (argc == 1) {
+ /* Backwards compatibility */
+ khugepaged_context = &__khugepaged_context;
+ madvise_context = &__madvise_context;
+ anon_ops = &__anon_ops;
+ return;
+ }
+
+ buf = strdup(argv[1]);
+ token = strsep(&buf, ":");
+
+ if (!strcmp(token, "all")) {
+ khugepaged_context = &__khugepaged_context;
+ madvise_context = &__madvise_context;
+ } else if (!strcmp(token, "khugepaged")) {
+ khugepaged_context = &__khugepaged_context;
+ } else if (!strcmp(token, "madvise")) {
+ madvise_context = &__madvise_context;
+ } else {
+ usage();
+ }
+
+ if (!buf)
+ usage();
+
+ if (!strcmp(buf, "all")) {
+ file_ops = &__file_ops;
+ anon_ops = &__anon_ops;
+ shmem_ops = &__shmem_ops;
+ } else if (!strcmp(buf, "anon")) {
+ anon_ops = &__anon_ops;
+ } else if (!strcmp(buf, "file")) {
+ file_ops = &__file_ops;
+ } else if (!strcmp(buf, "shmem")) {
+ shmem_ops = &__shmem_ops;
+ } else {
+ usage();
+ }
+
+ if (!file_ops)
+ return;
+
+ if (argc != 3)
+ usage();
+}
+
+int main(int argc, const char **argv)
+{
+ struct settings default_settings = {
+ .thp_enabled = THP_MADVISE,
+ .thp_defrag = THP_DEFRAG_ALWAYS,
+ .shmem_enabled = SHMEM_ADVISE,
+ .use_zero_page = 0,
+ .khugepaged = {
+ .defrag = 1,
+ .alloc_sleep_millisecs = 10,
+ .scan_sleep_millisecs = 10,
+ },
+ /*
+ * When testing file-backed memory, the collapse path
+ * looks at how many pages are found in the page cache, not
+ * what pages are mapped. Disable read ahead optimization so
+ * pages don't find their way into the page cache unless
+ * we mem_ops->fault() them in.
+ */
+ .read_ahead_kb = 0,
+ };
+
+ parse_test_type(argc, argv);
+
+ if (file_ops)
+ get_finfo(argv[2]);
+
setbuf(stdout, NULL);
page_size = getpagesize();
- hpage_pmd_size = read_num("hpage_pmd_size");
+ hpage_pmd_size = read_pmd_pagesize();
hpage_pmd_nr = hpage_pmd_size / page_size;
default_settings.khugepaged.max_ptes_none = hpage_pmd_nr - 1;
@@ -1011,21 +1484,75 @@ int main(void)
default_settings.khugepaged.pages_to_scan = hpage_pmd_nr * 8;
save_settings();
- adjust_settings();
+ push_settings(&default_settings);
alloc_at_fault();
- collapse_full();
- collapse_empty();
- collapse_single_pte_entry();
- collapse_max_ptes_none();
- collapse_swapin_single_pte();
- collapse_max_ptes_swap();
- collapse_single_pte_entry_compound();
- collapse_full_of_compound();
- collapse_compound_extreme();
- collapse_fork();
- collapse_fork_compound();
- collapse_max_ptes_shared();
+
+#define TEST(t, c, o) do { \
+ if (c && o) { \
+ printf("\nRun test: " #t " (%s:%s)\n", c->name, o->name); \
+ t(c, o); \
+ } \
+ } while (0)
+
+ TEST(collapse_full, khugepaged_context, anon_ops);
+ TEST(collapse_full, khugepaged_context, file_ops);
+ TEST(collapse_full, khugepaged_context, shmem_ops);
+ TEST(collapse_full, madvise_context, anon_ops);
+ TEST(collapse_full, madvise_context, file_ops);
+ TEST(collapse_full, madvise_context, shmem_ops);
+
+ TEST(collapse_empty, khugepaged_context, anon_ops);
+ TEST(collapse_empty, madvise_context, anon_ops);
+
+ TEST(collapse_single_pte_entry, khugepaged_context, anon_ops);
+ TEST(collapse_single_pte_entry, khugepaged_context, file_ops);
+ TEST(collapse_single_pte_entry, khugepaged_context, shmem_ops);
+ TEST(collapse_single_pte_entry, madvise_context, anon_ops);
+ TEST(collapse_single_pte_entry, madvise_context, file_ops);
+ TEST(collapse_single_pte_entry, madvise_context, shmem_ops);
+
+ TEST(collapse_max_ptes_none, khugepaged_context, anon_ops);
+ TEST(collapse_max_ptes_none, khugepaged_context, file_ops);
+ TEST(collapse_max_ptes_none, madvise_context, anon_ops);
+ TEST(collapse_max_ptes_none, madvise_context, file_ops);
+
+ TEST(collapse_single_pte_entry_compound, khugepaged_context, anon_ops);
+ TEST(collapse_single_pte_entry_compound, khugepaged_context, file_ops);
+ TEST(collapse_single_pte_entry_compound, madvise_context, anon_ops);
+ TEST(collapse_single_pte_entry_compound, madvise_context, file_ops);
+
+ TEST(collapse_full_of_compound, khugepaged_context, anon_ops);
+ TEST(collapse_full_of_compound, khugepaged_context, file_ops);
+ TEST(collapse_full_of_compound, khugepaged_context, shmem_ops);
+ TEST(collapse_full_of_compound, madvise_context, anon_ops);
+ TEST(collapse_full_of_compound, madvise_context, file_ops);
+ TEST(collapse_full_of_compound, madvise_context, shmem_ops);
+
+ TEST(collapse_compound_extreme, khugepaged_context, anon_ops);
+ TEST(collapse_compound_extreme, madvise_context, anon_ops);
+
+ TEST(collapse_swapin_single_pte, khugepaged_context, anon_ops);
+ TEST(collapse_swapin_single_pte, madvise_context, anon_ops);
+
+ TEST(collapse_max_ptes_swap, khugepaged_context, anon_ops);
+ TEST(collapse_max_ptes_swap, madvise_context, anon_ops);
+
+ TEST(collapse_fork, khugepaged_context, anon_ops);
+ TEST(collapse_fork, madvise_context, anon_ops);
+
+ TEST(collapse_fork_compound, khugepaged_context, anon_ops);
+ TEST(collapse_fork_compound, madvise_context, anon_ops);
+
+ TEST(collapse_max_ptes_shared, khugepaged_context, anon_ops);
+ TEST(collapse_max_ptes_shared, madvise_context, anon_ops);
+
+ TEST(madvise_collapse_existing_thps, madvise_context, anon_ops);
+ TEST(madvise_collapse_existing_thps, madvise_context, file_ops);
+ TEST(madvise_collapse_existing_thps, madvise_context, shmem_ops);
+
+ TEST(madvise_retracted_page_tables, madvise_context, file_ops);
+ TEST(madvise_retracted_page_tables, madvise_context, shmem_ops);
restore_settings(0);
}
diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c
index db0270127aeb..9496346973d4 100644
--- a/tools/testing/selftests/vm/mremap_test.c
+++ b/tools/testing/selftests/vm/mremap_test.c
@@ -119,6 +119,50 @@ static unsigned long long get_mmap_min_addr(void)
}
/*
+ * This test validates that merge is called when expanding a mapping.
+ * Mapping containing three pages is created, middle page is unmapped
+ * and then the mapping containing the first page is expanded so that
+ * it fills the created hole. The two parts should merge creating
+ * single mapping with three pages.
+ */
+static void mremap_expand_merge(unsigned long page_size)
+{
+ char *test_name = "mremap expand merge";
+ FILE *fp;
+ char *line = NULL;
+ size_t len = 0;
+ bool success = false;
+ char *start = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ munmap(start + page_size, page_size);
+ mremap(start, page_size, 2 * page_size, 0);
+
+ fp = fopen("/proc/self/maps", "r");
+ if (fp == NULL) {
+ ksft_test_result_fail("%s\n", test_name);
+ return;
+ }
+
+ while (getline(&line, &len, fp) != -1) {
+ char *first = strtok(line, "- ");
+ void *first_val = (void *)strtol(first, NULL, 16);
+ char *second = strtok(NULL, "- ");
+ void *second_val = (void *) strtol(second, NULL, 16);
+
+ if (first_val == start && second_val == start + 3 * page_size) {
+ success = true;
+ break;
+ }
+ }
+ if (success)
+ ksft_test_result_pass("%s\n", test_name);
+ else
+ ksft_test_result_fail("%s\n", test_name);
+ fclose(fp);
+}
+
+/*
* Returns the start address of the mapping on success, else returns
* NULL on failure.
*/
@@ -336,6 +380,7 @@ int main(int argc, char **argv)
int i, run_perf_tests;
unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD;
unsigned int pattern_seed;
+ int num_expand_tests = 1;
struct test test_cases[MAX_TEST];
struct test perf_test_cases[MAX_PERF_TEST];
int page_size;
@@ -407,12 +452,14 @@ int main(int argc, char **argv)
(threshold_mb * _1MB >= _1GB);
ksft_set_plan(ARRAY_SIZE(test_cases) + (run_perf_tests ?
- ARRAY_SIZE(perf_test_cases) : 0));
+ ARRAY_SIZE(perf_test_cases) : 0) + num_expand_tests);
for (i = 0; i < ARRAY_SIZE(test_cases); i++)
run_mremap_test_case(test_cases[i], &failures, threshold_mb,
pattern_seed);
+ mremap_expand_merge(page_size);
+
if (run_perf_tests) {
ksft_print_msg("\n%s\n",
"mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:");
diff --git a/tools/testing/selftests/vm/run_vmtests.sh b/tools/testing/selftests/vm/run_vmtests.sh
index de86983b8a0f..e780e76c26b8 100755
--- a/tools/testing/selftests/vm/run_vmtests.sh
+++ b/tools/testing/selftests/vm/run_vmtests.sh
@@ -120,11 +120,16 @@ run_test ./gup_test -a
# Dump pages 0, 19, and 4096, using pin_user_pages:
run_test ./gup_test -ct -F 0x1 0 19 0x1000
-run_test ./userfaultfd anon 20 16
-# Test requires source and destination huge pages. Size of source
-# (half_ufd_size_MB) is passed as argument to test.
-run_test ./userfaultfd hugetlb "$half_ufd_size_MB" 32
-run_test ./userfaultfd shmem 20 16
+uffd_mods=("" ":dev")
+for mod in "${uffd_mods[@]}"; do
+ run_test ./userfaultfd anon${mod} 20 16
+ # Hugetlb tests require source and destination huge pages. Pass in half
+ # the size ($half_ufd_size_MB), which is used for *each*.
+ run_test ./userfaultfd hugetlb${mod} "$half_ufd_size_MB" 32
+ run_test ./userfaultfd hugetlb_shared${mod} "$half_ufd_size_MB" 32 "$mnt"/uffd-test
+ rm -f "$mnt"/uffd-test
+ run_test ./userfaultfd shmem${mod} 20 16
+done
#cleanup
umount "$mnt"
diff --git a/tools/testing/selftests/vm/soft-dirty.c b/tools/testing/selftests/vm/soft-dirty.c
index e3a43f5d4fa2..21d8830c5f24 100644
--- a/tools/testing/selftests/vm/soft-dirty.c
+++ b/tools/testing/selftests/vm/soft-dirty.c
@@ -91,7 +91,7 @@ static void test_hugepage(int pagemap_fd, int pagesize)
for (i = 0; i < hpage_len; i++)
map[i] = (char)i;
- if (check_huge(map)) {
+ if (check_huge_anon(map, 1, hpage_len)) {
ksft_test_result_pass("Test %s huge page allocation\n", __func__);
clear_softdirty();
diff --git a/tools/testing/selftests/vm/split_huge_page_test.c b/tools/testing/selftests/vm/split_huge_page_test.c
index 6aa2b8253aed..76e1c36dd9e5 100644
--- a/tools/testing/selftests/vm/split_huge_page_test.c
+++ b/tools/testing/selftests/vm/split_huge_page_test.c
@@ -92,7 +92,6 @@ void split_pmd_thp(void)
{
char *one_page;
size_t len = 4 * pmd_pagesize;
- uint64_t thp_size;
size_t i;
one_page = memalign(pmd_pagesize, len);
@@ -107,8 +106,7 @@ void split_pmd_thp(void)
for (i = 0; i < len; i++)
one_page[i] = (char)i;
- thp_size = check_huge(one_page);
- if (!thp_size) {
+ if (!check_huge_anon(one_page, 1, pmd_pagesize)) {
printf("No THP is allocated\n");
exit(EXIT_FAILURE);
}
@@ -124,9 +122,8 @@ void split_pmd_thp(void)
}
- thp_size = check_huge(one_page);
- if (thp_size) {
- printf("Still %ld kB AnonHugePages not split\n", thp_size);
+ if (check_huge_anon(one_page, 0, pmd_pagesize)) {
+ printf("Still AnonHugePages not split\n");
exit(EXIT_FAILURE);
}
@@ -172,8 +169,7 @@ void split_pte_mapped_thp(void)
for (i = 0; i < len; i++)
one_page[i] = (char)i;
- thp_size = check_huge(one_page);
- if (!thp_size) {
+ if (!check_huge_anon(one_page, 1, pmd_pagesize)) {
printf("No THP is allocated\n");
exit(EXIT_FAILURE);
}
diff --git a/tools/testing/selftests/vm/test_hmm.sh b/tools/testing/selftests/vm/test_hmm.sh
index 539c9371e592..46e19b5d648d 100755
--- a/tools/testing/selftests/vm/test_hmm.sh
+++ b/tools/testing/selftests/vm/test_hmm.sh
@@ -52,21 +52,11 @@ load_driver()
usage
fi
fi
- if [ $? == 0 ]; then
- major=$(awk "\$2==\"HMM_DMIRROR\" {print \$1}" /proc/devices)
- mknod /dev/hmm_dmirror0 c $major 0
- mknod /dev/hmm_dmirror1 c $major 1
- if [ $# -eq 2 ]; then
- mknod /dev/hmm_dmirror2 c $major 2
- mknod /dev/hmm_dmirror3 c $major 3
- fi
- fi
}
unload_driver()
{
modprobe -r $DRIVER > /dev/null 2>&1
- rm -f /dev/hmm_dmirror?
}
run_smoke()
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 7c3f1b0ab468..74babdbc02e5 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -61,10 +61,11 @@
#include <sys/random.h>
#include "../kselftest.h"
+#include "vm_util.h"
#ifdef __NR_userfaultfd
-static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size, hpage_size;
#define BOUNCE_RANDOM (1<<0)
#define BOUNCE_RACINGFAULTS (1<<1)
@@ -77,6 +78,13 @@ static int bounces;
#define TEST_SHMEM 3
static int test_type;
+#define UFFD_FLAGS (O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY)
+
+#define BASE_PMD_ADDR ((void *)(1UL << 30))
+
+/* test using /dev/userfaultfd, instead of userfaultfd(2) */
+static bool test_dev_userfaultfd;
+
/* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */
#define ALARM_INTERVAL_SECS 10
static volatile bool test_uffdio_copy_eexist = true;
@@ -92,9 +100,10 @@ static int huge_fd;
static unsigned long long *count_verify;
static int uffd = -1;
static int uffd_flags, finished, *pipefd;
-static char *area_src, *area_src_alias, *area_dst, *area_dst_alias;
+static char *area_src, *area_src_alias, *area_dst, *area_dst_alias, *area_remap;
static char *zeropage;
pthread_attr_t attr;
+static bool test_collapse;
/* Userfaultfd test statistics */
struct uffd_stats {
@@ -122,9 +131,13 @@ struct uffd_stats {
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+#define factor_of_2(x) ((x) ^ ((x) & ((x) - 1)))
+
const char *examples =
"# Run anonymous memory test on 100MiB region with 99999 bounces:\n"
"./userfaultfd anon 100 99999\n\n"
+ "# Run the same anonymous memory test, but using /dev/userfaultfd:\n"
+ "./userfaultfd anon:dev 100 99999\n\n"
"# Run share memory test on 1GiB region with 99 bounces:\n"
"./userfaultfd shmem 1000 99\n\n"
"# Run hugetlb memory test on 256MiB region with 50 bounces:\n"
@@ -141,6 +154,16 @@ static void usage(void)
"[hugetlbfs_file]\n\n");
fprintf(stderr, "Supported <test type>: anon, hugetlb, "
"hugetlb_shared, shmem\n\n");
+ fprintf(stderr, "'Test mods' can be joined to the test type string with a ':'. "
+ "Supported mods:\n");
+ fprintf(stderr, "\tsyscall - Use userfaultfd(2) (default)\n");
+ fprintf(stderr, "\tdev - Use /dev/userfaultfd instead of userfaultfd(2)\n");
+ fprintf(stderr, "\tcollapse - Test MADV_COLLAPSE of UFFDIO_REGISTER_MODE_MINOR\n"
+ "memory\n");
+ fprintf(stderr, "\nExample test mod usage:\n");
+ fprintf(stderr, "# Run anonymous memory test with /dev/userfaultfd:\n");
+ fprintf(stderr, "./userfaultfd anon:dev 100 99999\n\n");
+
fprintf(stderr, "Examples:\n\n");
fprintf(stderr, "%s", examples);
exit(1);
@@ -154,12 +177,14 @@ static void usage(void)
ret, __LINE__); \
} while (0)
-#define err(fmt, ...) \
+#define errexit(exitcode, fmt, ...) \
do { \
_err(fmt, ##__VA_ARGS__); \
- exit(1); \
+ exit(exitcode); \
} while (0)
+#define err(fmt, ...) errexit(1, fmt, ##__VA_ARGS__)
+
static void uffd_stats_reset(struct uffd_stats *uffd_stats,
unsigned long n_cpus)
{
@@ -212,12 +237,10 @@ static void anon_release_pages(char *rel_area)
err("madvise(MADV_DONTNEED) failed");
}
-static void anon_allocate_area(void **alloc_area)
+static void anon_allocate_area(void **alloc_area, bool is_src)
{
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (*alloc_area == MAP_FAILED)
- err("mmap of anonymous memory failed");
}
static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset)
@@ -235,7 +258,7 @@ static void hugetlb_release_pages(char *rel_area)
}
}
-static void hugetlb_allocate_area(void **alloc_area)
+static void hugetlb_allocate_area(void **alloc_area, bool is_src)
{
void *area_alias = NULL;
char **alloc_area_alias;
@@ -245,7 +268,7 @@ static void hugetlb_allocate_area(void **alloc_area)
nr_pages * page_size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB |
- (*alloc_area == area_src ? 0 : MAP_NORESERVE),
+ (is_src ? 0 : MAP_NORESERVE),
-1,
0);
else
@@ -253,9 +276,9 @@ static void hugetlb_allocate_area(void **alloc_area)
nr_pages * page_size,
PROT_READ | PROT_WRITE,
MAP_SHARED |
- (*alloc_area == area_src ? 0 : MAP_NORESERVE),
+ (is_src ? 0 : MAP_NORESERVE),
huge_fd,
- *alloc_area == area_src ? 0 : nr_pages * page_size);
+ is_src ? 0 : nr_pages * page_size);
if (*alloc_area == MAP_FAILED)
err("mmap of hugetlbfs file failed");
@@ -265,12 +288,12 @@ static void hugetlb_allocate_area(void **alloc_area)
PROT_READ | PROT_WRITE,
MAP_SHARED,
huge_fd,
- *alloc_area == area_src ? 0 : nr_pages * page_size);
+ is_src ? 0 : nr_pages * page_size);
if (area_alias == MAP_FAILED)
err("mmap of hugetlb file alias failed");
}
- if (*alloc_area == area_src) {
+ if (is_src) {
alloc_area_alias = &area_src_alias;
} else {
alloc_area_alias = &area_dst_alias;
@@ -293,21 +316,36 @@ static void shmem_release_pages(char *rel_area)
err("madvise(MADV_REMOVE) failed");
}
-static void shmem_allocate_area(void **alloc_area)
+static void shmem_allocate_area(void **alloc_area, bool is_src)
{
void *area_alias = NULL;
- bool is_src = alloc_area == (void **)&area_src;
- unsigned long offset = is_src ? 0 : nr_pages * page_size;
+ size_t bytes = nr_pages * page_size;
+ unsigned long offset = is_src ? 0 : bytes;
+ char *p = NULL, *p_alias = NULL;
+
+ if (test_collapse) {
+ p = BASE_PMD_ADDR;
+ if (!is_src)
+ /* src map + alias + interleaved hpages */
+ p += 2 * (bytes + hpage_size);
+ p_alias = p;
+ p_alias += bytes;
+ p_alias += hpage_size; /* Prevent src/dst VMA merge */
+ }
- *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- MAP_SHARED, shm_fd, offset);
+ *alloc_area = mmap(p, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
+ shm_fd, offset);
if (*alloc_area == MAP_FAILED)
err("mmap of memfd failed");
+ if (test_collapse && *alloc_area != p)
+ err("mmap of memfd failed at %p", p);
- area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
- MAP_SHARED, shm_fd, offset);
+ area_alias = mmap(p_alias, bytes, PROT_READ | PROT_WRITE, MAP_SHARED,
+ shm_fd, offset);
if (area_alias == MAP_FAILED)
err("mmap of memfd alias failed");
+ if (test_collapse && area_alias != p_alias)
+ err("mmap of anonymous memory failed at %p", p_alias);
if (is_src)
area_src_alias = area_alias;
@@ -320,28 +358,39 @@ static void shmem_alias_mapping(__u64 *start, size_t len, unsigned long offset)
*start = (unsigned long)area_dst_alias + offset;
}
+static void shmem_check_pmd_mapping(void *p, int expect_nr_hpages)
+{
+ if (!check_huge_shmem(area_dst_alias, expect_nr_hpages, hpage_size))
+ err("Did not find expected %d number of hugepages",
+ expect_nr_hpages);
+}
+
struct uffd_test_ops {
- void (*allocate_area)(void **alloc_area);
+ void (*allocate_area)(void **alloc_area, bool is_src);
void (*release_pages)(char *rel_area);
void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset);
+ void (*check_pmd_mapping)(void *p, int expect_nr_hpages);
};
static struct uffd_test_ops anon_uffd_test_ops = {
.allocate_area = anon_allocate_area,
.release_pages = anon_release_pages,
.alias_mapping = noop_alias_mapping,
+ .check_pmd_mapping = NULL,
};
static struct uffd_test_ops shmem_uffd_test_ops = {
.allocate_area = shmem_allocate_area,
.release_pages = shmem_release_pages,
.alias_mapping = shmem_alias_mapping,
+ .check_pmd_mapping = shmem_check_pmd_mapping,
};
static struct uffd_test_ops hugetlb_uffd_test_ops = {
.allocate_area = hugetlb_allocate_area,
.release_pages = hugetlb_release_pages,
.alias_mapping = hugetlb_alias_mapping,
+ .check_pmd_mapping = NULL,
};
static struct uffd_test_ops *uffd_test_ops;
@@ -383,13 +432,34 @@ static void assert_expected_ioctls_present(uint64_t mode, uint64_t ioctls)
}
}
+static int __userfaultfd_open_dev(void)
+{
+ int fd, _uffd;
+
+ fd = open("/dev/userfaultfd", O_RDWR | O_CLOEXEC);
+ if (fd < 0)
+ errexit(KSFT_SKIP, "opening /dev/userfaultfd failed");
+
+ _uffd = ioctl(fd, USERFAULTFD_IOC_NEW, UFFD_FLAGS);
+ if (_uffd < 0)
+ errexit(errno == ENOTTY ? KSFT_SKIP : 1,
+ "creating userfaultfd failed");
+ close(fd);
+ return _uffd;
+}
+
static void userfaultfd_open(uint64_t *features)
{
struct uffdio_api uffdio_api;
- uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK | UFFD_USER_MODE_ONLY);
- if (uffd < 0)
- err("userfaultfd syscall not available in this kernel");
+ if (test_dev_userfaultfd)
+ uffd = __userfaultfd_open_dev();
+ else {
+ uffd = syscall(__NR_userfaultfd, UFFD_FLAGS);
+ if (uffd < 0)
+ errexit(errno == ENOSYS ? KSFT_SKIP : 1,
+ "creating userfaultfd failed");
+ }
uffd_flags = fcntl(uffd, F_GETFD, NULL);
uffdio_api.api = UFFD_API;
@@ -440,6 +510,7 @@ static void uffd_test_ctx_clear(void)
munmap_area((void **)&area_src_alias);
munmap_area((void **)&area_dst);
munmap_area((void **)&area_dst_alias);
+ munmap_area((void **)&area_remap);
}
static void uffd_test_ctx_init(uint64_t features)
@@ -448,8 +519,8 @@ static void uffd_test_ctx_init(uint64_t features)
uffd_test_ctx_clear();
- uffd_test_ops->allocate_area((void **)&area_src);
- uffd_test_ops->allocate_area((void **)&area_dst);
+ uffd_test_ops->allocate_area((void **)&area_src, true);
+ uffd_test_ops->allocate_area((void **)&area_dst, false);
userfaultfd_open(&features);
@@ -766,6 +837,7 @@ static void *uffd_poll_thread(void *arg)
err("remove failure");
break;
case UFFD_EVENT_REMAP:
+ area_remap = area_dst; /* save for later unmap */
area_dst = (char *)(unsigned long)msg.arg.remap.to;
break;
}
@@ -1218,13 +1290,30 @@ static int userfaultfd_sig_test(void)
return userfaults != 0;
}
+void check_memory_contents(char *p)
+{
+ unsigned long i;
+ uint8_t expected_byte;
+ void *expected_page;
+
+ if (posix_memalign(&expected_page, page_size, page_size))
+ err("out of memory");
+
+ for (i = 0; i < nr_pages; ++i) {
+ expected_byte = ~((uint8_t)(i % ((uint8_t)-1)));
+ memset(expected_page, expected_byte, page_size);
+ if (my_bcmp(expected_page, p + (i * page_size), page_size))
+ err("unexpected page contents after minor fault");
+ }
+
+ free(expected_page);
+}
+
static int userfaultfd_minor_test(void)
{
- struct uffdio_register uffdio_register;
unsigned long p;
+ struct uffdio_register uffdio_register;
pthread_t uffd_mon;
- uint8_t expected_byte;
- void *expected_page;
char c;
struct uffd_stats stats = { 0 };
@@ -1263,17 +1352,7 @@ static int userfaultfd_minor_test(void)
* fault. uffd_poll_thread will resolve the fault by bit-flipping the
* page's contents, and then issuing a CONTINUE ioctl.
*/
-
- if (posix_memalign(&expected_page, page_size, page_size))
- err("out of memory");
-
- for (p = 0; p < nr_pages; ++p) {
- expected_byte = ~((uint8_t)(p % ((uint8_t)-1)));
- memset(expected_page, expected_byte, page_size);
- if (my_bcmp(expected_page, area_dst_alias + (p * page_size),
- page_size))
- err("unexpected page contents after minor fault");
- }
+ check_memory_contents(area_dst_alias);
if (write(pipefd[1], &c, sizeof(c)) != sizeof(c))
err("pipe write");
@@ -1282,6 +1361,23 @@ static int userfaultfd_minor_test(void)
uffd_stats_report(&stats, 1);
+ if (test_collapse) {
+ printf("testing collapse of uffd memory into PMD-mapped THPs:");
+ if (madvise(area_dst_alias, nr_pages * page_size,
+ MADV_COLLAPSE))
+ err("madvise(MADV_COLLAPSE)");
+
+ uffd_test_ops->check_pmd_mapping(area_dst,
+ nr_pages * page_size /
+ hpage_size);
+ /*
+ * This won't cause uffd-fault - it purely just makes sure there
+ * was no corruption.
+ */
+ check_memory_contents(area_dst_alias);
+ printf(" done.\n");
+ }
+
return stats.missing_faults != 0 || stats.minor_faults != nr_pages;
}
@@ -1584,8 +1680,6 @@ unsigned long default_huge_page_size(void)
static void set_test_type(const char *type)
{
- uint64_t features = UFFD_API_FEATURES;
-
if (!strcmp(type, "anon")) {
test_type = TEST_ANON;
uffd_test_ops = &anon_uffd_test_ops;
@@ -1603,12 +1697,37 @@ static void set_test_type(const char *type)
test_type = TEST_SHMEM;
uffd_test_ops = &shmem_uffd_test_ops;
test_uffdio_minor = true;
- } else {
- err("Unknown test type: %s", type);
}
+}
+
+static void parse_test_type_arg(const char *raw_type)
+{
+ char *buf = strdup(raw_type);
+ uint64_t features = UFFD_API_FEATURES;
+
+ while (buf) {
+ const char *token = strsep(&buf, ":");
+
+ if (!test_type)
+ set_test_type(token);
+ else if (!strcmp(token, "dev"))
+ test_dev_userfaultfd = true;
+ else if (!strcmp(token, "syscall"))
+ test_dev_userfaultfd = false;
+ else if (!strcmp(token, "collapse"))
+ test_collapse = true;
+ else
+ err("unrecognized test mod '%s'", token);
+ }
+
+ if (!test_type)
+ err("failed to parse test type argument: '%s'", raw_type);
+
+ if (test_collapse && test_type != TEST_SHMEM)
+ err("Unsupported test: %s", raw_type);
if (test_type == TEST_HUGETLB)
- page_size = default_huge_page_size();
+ page_size = hpage_size;
else
page_size = sysconf(_SC_PAGE_SIZE);
@@ -1646,6 +1765,8 @@ static void sigalrm(int sig)
int main(int argc, char **argv)
{
+ size_t bytes;
+
if (argc < 4)
usage();
@@ -1653,11 +1774,41 @@ int main(int argc, char **argv)
err("failed to arm SIGALRM");
alarm(ALARM_INTERVAL_SECS);
- set_test_type(argv[1]);
+ hpage_size = default_huge_page_size();
+ parse_test_type_arg(argv[1]);
+ bytes = atol(argv[2]) * 1024 * 1024;
+
+ if (test_collapse && bytes & (hpage_size - 1))
+ err("MiB must be multiple of %lu if :collapse mod set",
+ hpage_size >> 20);
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
- nr_pages_per_cpu = atol(argv[2]) * 1024*1024 / page_size /
- nr_cpus;
+
+ if (test_collapse) {
+ /* nr_cpus must divide (bytes / page_size), otherwise,
+ * area allocations of (nr_pages * paze_size) won't be a
+ * multiple of hpage_size, even if bytes is a multiple of
+ * hpage_size.
+ *
+ * This means that nr_cpus must divide (N * (2 << (H-P))
+ * where:
+ * bytes = hpage_size * N
+ * hpage_size = 2 << H
+ * page_size = 2 << P
+ *
+ * And we want to chose nr_cpus to be the largest value
+ * satisfying this constraint, not larger than the number
+ * of online CPUs. Unfortunately, prime factorization of
+ * N and nr_cpus may be arbitrary, so have to search for it.
+ * Instead, just use the highest power of 2 dividing both
+ * nr_cpus and (bytes / page_size).
+ */
+ int x = factor_of_2(nr_cpus);
+ int y = factor_of_2(bytes / page_size);
+
+ nr_cpus = x < y ? x : y;
+ }
+ nr_pages_per_cpu = bytes / page_size / nr_cpus;
if (!nr_pages_per_cpu) {
_err("invalid MiB");
usage();
diff --git a/tools/testing/selftests/vm/vm_util.c b/tools/testing/selftests/vm/vm_util.c
index b58ab11a7a30..f11f8adda521 100644
--- a/tools/testing/selftests/vm/vm_util.c
+++ b/tools/testing/selftests/vm/vm_util.c
@@ -42,9 +42,9 @@ void clear_softdirty(void)
ksft_exit_fail_msg("writing clear_refs failed\n");
}
-static bool check_for_pattern(FILE *fp, const char *pattern, char *buf)
+bool check_for_pattern(FILE *fp, const char *pattern, char *buf, size_t len)
{
- while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) {
+ while (fgets(buf, len, fp)) {
if (!strncmp(buf, pattern, strlen(pattern)))
return true;
}
@@ -72,9 +72,10 @@ uint64_t read_pmd_pagesize(void)
return strtoul(buf, NULL, 10);
}
-uint64_t check_huge(void *addr)
+bool __check_huge(void *addr, char *pattern, int nr_hpages,
+ uint64_t hpage_size)
{
- uint64_t thp = 0;
+ uint64_t thp = -1;
int ret;
FILE *fp;
char buffer[MAX_LINE_LENGTH];
@@ -89,20 +90,37 @@ uint64_t check_huge(void *addr)
if (!fp)
ksft_exit_fail_msg("%s: Failed to open file %s\n", __func__, SMAP_FILE_PATH);
- if (!check_for_pattern(fp, addr_pattern, buffer))
+ if (!check_for_pattern(fp, addr_pattern, buffer, sizeof(buffer)))
goto err_out;
/*
- * Fetch the AnonHugePages: in the same block and check the number of
+ * Fetch the pattern in the same block and check the number of
* hugepages.
*/
- if (!check_for_pattern(fp, "AnonHugePages:", buffer))
+ if (!check_for_pattern(fp, pattern, buffer, sizeof(buffer)))
goto err_out;
- if (sscanf(buffer, "AnonHugePages:%10ld kB", &thp) != 1)
+ snprintf(addr_pattern, MAX_LINE_LENGTH, "%s%%9ld kB", pattern);
+
+ if (sscanf(buffer, addr_pattern, &thp) != 1)
ksft_exit_fail_msg("Reading smap error\n");
err_out:
fclose(fp);
- return thp;
+ return thp == (nr_hpages * (hpage_size >> 10));
+}
+
+bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size)
+{
+ return __check_huge(addr, "AnonHugePages: ", nr_hpages, hpage_size);
+}
+
+bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size)
+{
+ return __check_huge(addr, "FilePmdMapped:", nr_hpages, hpage_size);
+}
+
+bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size)
+{
+ return __check_huge(addr, "ShmemPmdMapped:", nr_hpages, hpage_size);
}
diff --git a/tools/testing/selftests/vm/vm_util.h b/tools/testing/selftests/vm/vm_util.h
index 2e512bd57ae1..5c35de454e08 100644
--- a/tools/testing/selftests/vm/vm_util.h
+++ b/tools/testing/selftests/vm/vm_util.h
@@ -5,5 +5,8 @@
uint64_t pagemap_get_entry(int fd, char *start);
bool pagemap_is_softdirty(int fd, char *start);
void clear_softdirty(void);
+bool check_for_pattern(FILE *fp, const char *pattern, char *buf, size_t len);
uint64_t read_pmd_pagesize(void);
-uint64_t check_huge(void *addr);
+bool check_huge_anon(void *addr, int nr_hpages, uint64_t hpage_size);
+bool check_huge_file(void *addr, int nr_hpages, uint64_t hpage_size);
+bool check_huge_shmem(void *addr, int nr_hpages, uint64_t hpage_size);
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index ec2e67c85b84..ce860ab94162 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -470,7 +470,12 @@ static bool match_str_list(const char *str, char **list, int list_size)
static bool is_need(char *buf)
{
- if ((filter & FILTER_UNRELEASE) && get_free_ts_nsec(buf) != 0)
+ __u64 ts_nsec, free_ts_nsec;
+
+ ts_nsec = get_ts_nsec(buf);
+ free_ts_nsec = get_free_ts_nsec(buf);
+
+ if ((filter & FILTER_UNRELEASE) && free_ts_nsec != 0 && ts_nsec < free_ts_nsec)
return false;
if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size))
return false;