summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt9
-rw-r--r--Documentation/core-api/idr.rst79
-rw-r--r--Documentation/core-api/index.rst1
-rw-r--r--Documentation/core-api/kernel-api.rst12
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt4
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt3
-rw-r--r--Documentation/driver-api/s390-drivers.rst32
-rw-r--r--Documentation/filesystems/afs.txt17
-rw-r--r--Documentation/virtual/kvm/00-INDEX3
-rw-r--r--Documentation/virtual/kvm/amd-memory-encryption.rst247
-rw-r--r--Documentation/virtual/kvm/api.txt54
-rw-r--r--Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt187
-rw-r--r--Documentation/virtual/kvm/cpuid.txt4
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile3
-rw-r--r--arch/arm/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_hyp.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h99
-rw-r--r--arch/arm/include/asm/kvm_psci.h27
-rw-r--r--arch/arm/include/asm/pgtable.h4
-rw-r--r--arch/arm/kvm/handle_exit.c17
-rw-r--r--arch/arm/kvm/hyp/switch.c1
-rw-r--r--arch/arm/kvm/hyp/tlb.c1
-rw-r--r--arch/arm/mach-vt8500/Kconfig1
-rw-r--r--arch/arm64/include/asm/assembler.h64
-rw-r--r--arch/arm64/include/asm/barrier.h22
-rw-r--r--arch/arm64/include/asm/cacheflush.h7
-rw-r--r--arch/arm64/include/asm/futex.h9
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h8
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h36
-rw-r--r--arch/arm64/include/asm/kvm_psci.h27
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h30
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/spinlock.h4
-rw-r--r--arch/arm64/include/asm/uaccess.h155
-rw-r--r--arch/arm64/kernel/acpi.c4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/bpi.S44
-rw-r--r--arch/arm64/kernel/cpu-reset.S2
-rw-r--r--arch/arm64/kernel/cpu_errata.c77
-rw-r--r--arch/arm64/kernel/cpufeature.c42
-rw-r--r--arch/arm64/kernel/entry.S29
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/hibernate-asm.S4
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kvm/guest.c15
-rw-r--r--arch/arm64/kvm/handle_exit.c14
-rw-r--r--arch/arm64/kvm/hyp-init.S2
-rw-r--r--arch/arm64/kvm/hyp/debug-sr.c1
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S20
-rw-r--r--arch/arm64/kvm/hyp/switch.c15
-rw-r--r--arch/arm64/kvm/hyp/tlb.c1
-rw-r--r--arch/arm64/lib/clear_user.S6
-rw-r--r--arch/arm64/lib/copy_in_user.S5
-rw-r--r--arch/arm64/mm/cache.S32
-rw-r--r--arch/arm64/mm/fault.c19
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/arm64/mm/proc.S212
-rw-r--r--arch/cris/kernel/Makefile17
-rw-r--r--arch/cris/kernel/setup.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/bcm63xx/boards/Kconfig1
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/mips.c67
-rw-r--r--arch/nios2/Kconfig1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h14
-rw-r--r--arch/powerpc/include/asm/kvm_host.h8
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h4
-rw-r--r--arch/powerpc/include/asm/opal-api.h1
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h6
-rw-r--r--arch/powerpc/include/asm/xive.h3
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kvm/Kconfig3
-rw-r--r--arch/powerpc/kvm/book3s.c24
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c70
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S231
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_pr.c20
-rw-r--r--arch/powerpc/kvm/book3s_xive.c109
-rw-r--r--arch/powerpc/kvm/book3s_xive.h15
-rw-r--r--arch/powerpc/kvm/booke.c51
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c36
-rw-r--r--arch/powerpc/kvm/powerpc.c200
-rw-r--r--arch/powerpc/kvm/timing.c3
-rw-r--r--arch/powerpc/sysdev/xive/native.c18
-rw-r--r--arch/s390/Kconfig46
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/bitops.h5
-rw-r--r--arch/s390/include/asm/css_chars.h4
-rw-r--r--arch/s390/include/asm/eadm.h2
-rw-r--r--arch/s390/include/asm/facility.h18
-rw-r--r--arch/s390/include/asm/kvm_host.h126
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/nospec-branch.h18
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/runtime_instr.h67
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/include/asm/sysinfo.h3
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/uapi/asm/runtime_instr.h74
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/alternative.c26
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/entry.S249
-rw-r--r--arch/s390/kernel/ipl.c1
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/module.c62
-rw-r--r--arch/s390/kernel/nospec-branch.c100
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c2
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/runtime_instr.c10
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vmlinux.lds.S14
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/diag.c1
-rw-r--r--arch/s390/kvm/interrupt.c288
-rw-r--r--arch/s390/kvm/kvm-s390.c209
-rw-r--r--arch/s390/kvm/kvm-s390.h22
-rw-r--r--arch/s390/kvm/priv.c38
-rw-r--r--arch/s390/kvm/sigp.c18
-rw-r--r--arch/s390/kvm/vsie.c91
-rw-r--r--arch/s390/mm/gmap.c44
-rw-r--r--arch/um/drivers/mconsole_kern.c3
-rw-r--r--arch/x86/entry/entry_32.S3
-rw-r--r--arch/x86/entry/entry_64.S3
-rw-r--r--arch/x86/hyperv/hv_init.c123
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/hardirq.h3
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/kvm_host.h22
-rw-r--r--arch/x86/include/asm/mshyperv.h32
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/pat.h2
-rw-r--r--arch/x86/include/asm/svm.h3
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h27
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h4
-rw-r--r--arch/x86/kernel/acpi/boot.c3
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c66
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c6
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kernel/irq.c9
-rw-r--r--arch/x86/kernel/kvm.c49
-rw-r--r--arch/x86/kvm/Kconfig8
-rw-r--r--arch/x86/kvm/cpuid.c22
-rw-r--r--arch/x86/kvm/emulate.c62
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/lapic.c25
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c26
-rw-r--r--arch/x86/kvm/mmu_audit.c2
-rw-r--r--arch/x86/kvm/svm.c1199
-rw-r--r--arch/x86/kvm/vmx.c756
-rw-r--r--arch/x86/kvm/vmx_shadow_fields.h77
-rw-r--r--arch/x86/kvm/x86.c338
-rw-r--r--arch/x86/kvm/x86.h33
-rw-r--r--arch/x86/mm/pat.c19
-rw-r--r--arch/x86/xen/p2m.c6
-rw-r--r--arch/x86/xen/xen-head.S16
-rw-r--r--drivers/acpi/Kconfig7
-rw-r--r--drivers/acpi/acpi_video.c2
-rw-r--r--drivers/acpi/acpica/acapps.h4
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconvert.h2
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h2
-rw-r--r--drivers/acpi/acpica/acopcode.h2
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/acpica/acresrc.h2
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h2
-rw-r--r--drivers/acpi/acpica/amlcode.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h2
-rw-r--r--drivers/acpi/acpica/dbcmds.c2
-rw-r--r--drivers/acpi/acpica/dbconvert.c2
-rw-r--r--drivers/acpi/acpica/dbdisply.c2
-rw-r--r--drivers/acpi/acpica/dbexec.c2
-rw-r--r--drivers/acpi/acpica/dbfileio.c2
-rw-r--r--drivers/acpi/acpica/dbhistry.c2
-rw-r--r--drivers/acpi/acpica/dbinput.c2
-rw-r--r--drivers/acpi/acpica/dbmethod.c2
-rw-r--r--drivers/acpi/acpica/dbnames.c2
-rw-r--r--drivers/acpi/acpica/dbobject.c2
-rw-r--r--drivers/acpi/acpica/dbstats.c2
-rw-r--r--drivers/acpi/acpica/dbtest.c2
-rw-r--r--drivers/acpi/acpica/dbutils.c2
-rw-r--r--drivers/acpi/acpica/dbxface.c2
-rw-r--r--drivers/acpi/acpica/dsargs.c2
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsdebug.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dspkginit.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evglock.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconcat.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c2
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c2
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c2
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c2
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/extrace.c2
-rw-r--r--drivers/acpi/acpica/exutils.c2
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwesleep.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/hwxface.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsarguments.c2
-rw-r--r--drivers/acpi/acpica/nsconvert.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsrepair.c2
-rw-r--r--drivers/acpi/acpica/nsrepair2.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c2
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psobject.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c2
-rw-r--r--drivers/acpi/acpica/psopinfo.c2
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c2
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c2
-rw-r--r--drivers/acpi/acpica/rscreate.c2
-rw-r--r--drivers/acpi/acpica/rsdump.c2
-rw-r--r--drivers/acpi/acpica/rsdumpinfo.c2
-rw-r--r--drivers/acpi/acpica/rsinfo.c2
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c2
-rw-r--r--drivers/acpi/acpica/rslist.c2
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c2
-rw-r--r--drivers/acpi/acpica/rsserial.c2
-rw-r--r--drivers/acpi/acpica/rsutils.c2
-rw-r--r--drivers/acpi/acpica/rsxface.c2
-rw-r--r--drivers/acpi/acpica/tbdata.c2
-rw-r--r--drivers/acpi/acpica/tbfadt.c2
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbprint.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c2
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c2
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utascii.c2
-rw-r--r--drivers/acpi/acpica/utbuffer.c2
-rw-r--r--drivers/acpi/acpica/utcache.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c2
-rw-r--r--drivers/acpi/acpica/utdelete.c2
-rw-r--r--drivers/acpi/acpica/uterror.c2
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utexcep.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c2
-rw-r--r--drivers/acpi/acpica/uthex.c2
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c2
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c2
-rw-r--r--drivers/acpi/acpica/utnonansi.c2
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utownerid.c2
-rw-r--r--drivers/acpi/acpica/utpredef.c2
-rw-r--r--drivers/acpi/acpica/utprint.c2
-rw-r--r--drivers/acpi/acpica/utresdecode.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c2
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstring.c2
-rw-r--r--drivers/acpi/acpica/utstrsuppt.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c2
-rw-r--r--drivers/acpi/acpica/uttrack.c2
-rw-r--r--drivers/acpi/acpica/utuuid.c2
-rw-r--r--drivers/acpi/acpica/utxface.c2
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfinit.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c2
-rw-r--r--drivers/acpi/battery.c16
-rw-r--r--drivers/acpi/bus.c45
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/sbshc.c4
-rw-r--r--drivers/acpi/scan.c20
-rw-r--r--drivers/acpi/spcr.c29
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/atm/he.c8
-rw-r--r--drivers/base/power/domain.c76
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/virtio_blk.c32
-rw-r--r--drivers/cpufreq/Kconfig10
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c11
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c127
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c7
-rw-r--r--drivers/cpufreq/freq_table.c8
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpufreq/longhaul.c4
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c6
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c6
-rw-r--r--drivers/crypto/ccp/Kconfig12
-rw-r--r--drivers/crypto/ccp/Makefile1
-rw-r--r--drivers/crypto/ccp/psp-dev.c805
-rw-r--r--drivers/crypto/ccp/psp-dev.h83
-rw-r--r--drivers/crypto/ccp/sp-dev.c35
-rw-r--r--drivers/crypto/ccp/sp-dev.h28
-rw-r--r--drivers/crypto/ccp/sp-pci.c52
-rw-r--r--drivers/firmware/psci.c55
-rw-r--r--drivers/firmware/qemu_fw_cfg.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c52
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c42
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c98
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c19
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c61
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c94
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c20
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c11
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c26
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c16
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c189
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c155
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c71
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h746
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h802
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h1006
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h48
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h21
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/iommu/amd_iommu.c24
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/arm-smmu.c12
-rw-r--r--drivers/iommu/exynos-iommu.c9
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/intel-svm.c32
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/iommu/ipmmu-vmsa.c14
-rw-r--r--drivers/iommu/msm_iommu.c16
-rw-r--r--drivers/iommu/of_iommu.c16
-rw-r--r--drivers/iommu/omap-iommu-debug.c4
-rw-r--r--drivers/iommu/qcom_iommu.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c20
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c13
-rw-r--r--drivers/net/ethernet/sun/Kconfig1
-rw-r--r--drivers/net/ethernet/sun/cassini.c1
-rw-r--r--drivers/net/ethernet/sun/cassini.h1
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c1
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c1
-rw-r--r--drivers/net/ethernet/sun/sunqe.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c16
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/tun.c15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c40
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c52
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2_main.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/xen-netfront.c46
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c5
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c2
-rw-r--r--drivers/pcmcia/soc_common.c8
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c44
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c8
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c372
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/cio/chp.c10
-rw-r--r--drivers/s390/cio/cio.c2
-rw-r--r--drivers/s390/cio/cmf.c15
-rw-r--r--drivers/s390/cio/itcw.c2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/net/qeth_core.h7
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/sh/clk/core.c5
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig1
-rw-r--r--drivers/staging/rtl8192u/Kconfig1
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c145
-rw-r--r--drivers/target/sbp/sbp_target.c13
-rw-r--r--drivers/target/target_core_configfs.c6
-rw-r--r--drivers/target/target_core_device.c4
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_internal.h3
-rw-r--r--drivers/target/target_core_pr.c4
-rw-r--r--drivers/target/target_core_sbc.c8
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_user.c983
-rw-r--r--drivers/tty/serial/earlycon.c15
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c11
-rw-r--r--drivers/vhost/test.c2
-rw-r--r--drivers/vhost/vhost.c68
-rw-r--r--drivers/vhost/vhost.h9
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/fbdev/Kconfig1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/virtio/virtio.c18
-rw-r--r--drivers/virtio/virtio_balloon.c4
-rw-r--r--drivers/virtio/virtio_mmio.c6
-rw-r--r--drivers/virtio/virtio_pci_common.c8
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/pvcalls-back.c2
-rw-r--r--fs/afs/addr_list.c13
-rw-r--r--fs/afs/dir.c122
-rw-r--r--fs/afs/inode.c48
-rw-r--r--fs/afs/internal.h12
-rw-r--r--fs/afs/mntpt.c20
-rw-r--r--fs/afs/rotate.c293
-rw-r--r--fs/afs/server_list.c3
-rw-r--r--fs/afs/super.c132
-rw-r--r--fs/afs/vlclient.c10
-rw-r--r--fs/afs/volume.c47
-rw-r--r--fs/ceph/addr.c28
-rw-r--r--fs/ceph/caps.c170
-rw-r--r--fs/ceph/dir.c79
-rw-r--r--fs/ceph/file.c12
-rw-r--r--fs/ceph/inode.c56
-rw-r--r--fs/ceph/mds_client.c33
-rw-r--r--fs/ceph/mds_client.h3
-rw-r--r--fs/ceph/snap.c8
-rw-r--r--fs/ceph/super.h53
-rw-r--r--fs/cifs/cifs_debug.c9
-rw-r--r--fs/cifs/cifssmb.c4
-rw-r--r--fs/cifs/smb2pdu.h114
-rw-r--r--fs/cifs/smbdirect.c16
-rw-r--r--fs/cramfs/Kconfig3
-rw-r--r--fs/kernfs/file.c2
-rw-r--r--fs/locks.c6
-rw-r--r--fs/nfsd/nfs3xdr.c31
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4state.c6
-rw-r--r--fs/nfsd/nfs4xdr.c16
-rw-r--r--fs/nfsd/nfsfh.h28
-rw-r--r--fs/nfsd/nfsxdr.c1
-rw-r--r--fs/orangefs/dcache.c19
-rw-r--r--fs/orangefs/namei.c16
-rw-r--r--fs/orangefs/orangefs-debugfs.c4
-rw-r--r--fs/orangefs/orangefs-debugfs.h1
-rw-r--r--fs/orangefs/orangefs-kernel.h10
-rw-r--r--fs/orangefs/orangefs-utils.c93
-rw-r--r--fs/orangefs/protocol.h15
-rw-r--r--fs/orangefs/super.c8
-rw-r--r--fs/seq_file.c5
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h12
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/platform/acenv.h2
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/kvm/arm_arch_timer.h2
-rw-r--r--include/kvm/arm_psci.h51
-rw-r--r--include/kvm/arm_vgic.h13
-rw-r--r--include/linux/acpi.h7
-rw-r--r--include/linux/arm-smccc.h165
-rw-r--r--include/linux/cpufreq.h125
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/linux/idr.h174
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/kvm_host.h14
-rw-r--r--include/linux/of_iommu.h5
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/psci.h13
-rw-r--r--include/linux/psp-sev.h606
-rw-r--r--include/linux/ptr_ring.h15
-rw-r--r--include/linux/radix-tree.h17
-rw-r--r--include/linux/serial_core.h4
-rw-r--r--include/linux/sunrpc/sched.h3
-rw-r--r--include/linux/sunrpc/svc_rdma.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_flow_table.h6
-rw-r--r--include/net/tcp.h8
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/uapi/linux/kvm.h90
-rw-r--r--include/uapi/linux/psci.h3
-rw-r--r--include/uapi/linux/psp-sev.h142
-rw-r--r--include/uapi/linux/virtio_balloon.h3
-rw-r--r--kernel/bpf/sockmap.c187
-rw-r--r--kernel/configs/kvm_guest.config1
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/trace_events_filter.c9
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/idr.c255
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/test_bpf.c31
-rw-r--r--net/ceph/ceph_common.c4
-rw-r--r--net/core/rtnetlink.c48
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/dns_resolver/dns_query.c22
-rw-r--r--net/ipv4/netfilter/Kconfig3
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c1
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_ulp.c59
-rw-r--r--net/ipv6/netfilter/Kconfig3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c1
-rw-r--r--net/ipv6/route.c8
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mpls/af_mpls.c24
-rw-r--r--net/netfilter/Kconfig8
-rw-r--r--net/netfilter/nf_flow_table.c76
-rw-r--r--net/netfilter/nf_flow_table_inet.c1
-rw-r--r--net/netfilter/nf_tables_api.c17
-rw-r--r--net/netfilter/nft_flow_offload.c24
-rw-r--r--net/netfilter/x_tables.c7
-rw-r--r--net/netfilter/xt_RATEEST.c22
-rw-r--r--net/netfilter/xt_cgroup.c1
-rw-r--r--net/netlink/genetlink.c12
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/connection.c15
-rw-r--r--net/rds/ib.c17
-rw-r--r--net/rds/ib_cm.c1
-rw-r--r--net/rds/rds.h7
-rw-r--r--net/rds/send.c10
-rw-r--r--net/rds/tcp.c42
-rw-r--r--net/rds/tcp_connect.c2
-rw-r--r--net/rds/tcp_recv.c2
-rw-r--r--net/rds/tcp_send.c2
-rw-r--r--net/rds/threads.c6
-rw-r--r--net/rxrpc/conn_client.c3
-rw-r--r--net/rxrpc/conn_event.c1
-rw-r--r--net/rxrpc/conn_object.c16
-rw-r--r--net/rxrpc/rxkad.c92
-rw-r--r--net/sched/act_api.c72
-rw-r--r--net/sched/cls_api.c8
-rw-r--r--net/sched/cls_basic.c33
-rw-r--r--net/sched/cls_bpf.c30
-rw-r--r--net/sched/cls_flower.c34
-rw-r--r--net/sched/cls_u32.c50
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sctp/sm_make_chunk.c7
-rw-r--r--net/sunrpc/sched.c16
-rw-r--r--net/sunrpc/svcsock.c14
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c5
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c9
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c25
-rw-r--r--net/sunrpc/xprtrdma/verbs.c8
-rw-r--r--net/sunrpc/xprtsock.c27
-rw-r--r--net/tipc/msg.c4
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--scripts/coccinelle/free/devm_free.cocci55
-rw-r--r--scripts/coccinelle/null/deref_null.cocci6
-rw-r--r--scripts/gcc-plugins/gcc-common.h4
-rw-r--r--scripts/gcc-plugins/latent_entropy_plugin.c17
-rw-r--r--scripts/gcc-plugins/randomize_layout_plugin.c75
-rw-r--r--scripts/gcc-plugins/structleak_plugin.c19
-rw-r--r--scripts/kconfig/.gitignore1
-rw-r--r--scripts/kconfig/conf.c40
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/expr.c4
-rw-r--r--scripts/kconfig/lkc.h1
-rw-r--r--scripts/kconfig/lkc_proto.h2
-rw-r--r--scripts/kconfig/nconf.gui.c3
-rw-r--r--scripts/kconfig/symbol.c22
-rw-r--r--scripts/kconfig/util.c15
-rw-r--r--scripts/kconfig/zconf.l29
-rw-r--r--scripts/kconfig/zconf.y6
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile5
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst3
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool72
-rw-r--r--tools/include/uapi/linux/bpf_common.h7
-rw-r--r--tools/lib/bpf/libbpf.c56
-rw-r--r--tools/power/acpi/common/cmfsize.c2
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/testing/radix-tree/idr-test.c29
-rw-r--r--tools/testing/radix-tree/linux/kernel.h2
-rw-r--r--tools/testing/selftests/bpf/Makefile12
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh18
-rwxr-xr-xtools/testing/selftests/bpf/test_libbpf.sh49
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c150
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh1
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc37
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions10
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c21
-rw-r--r--tools/virtio/ringtest/ring.c30
-rw-r--r--tools/virtio/ringtest/virtio_ring_0_9.c24
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/arm/arch_timer.c138
-rw-r--r--virt/kvm/arm/arm.c155
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c1
-rw-r--r--virt/kvm/arm/mmu.c64
-rw-r--r--virt/kvm/arm/psci.c143
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c4
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c115
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c29
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c29
-rw-r--r--virt/kvm/arm/vgic/vgic.c41
-rw-r--r--virt/kvm/arm/vgic/vgic.h8
-rw-r--r--virt/kvm/kvm_main.c62
830 files changed, 16441 insertions, 6481 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 39ac9d4fad7f..1d1d53f85ddd 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -931,9 +931,12 @@
earlycon= [KNL] Output early console device and options.
- When used with no options, the early console is
- determined by the stdout-path property in device
- tree's chosen node.
+ [ARM64] The early console is determined by the
+ stdout-path property in device tree's chosen node,
+ or determined by the ACPI SPCR table.
+
+ [X86] When used with no options the early console is
+ determined by the ACPI SPCR table.
cdns,<addr>[,options]
Start an early, polled-mode console on a Cadence
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
new file mode 100644
index 000000000000..9078a5c3ac95
--- /dev/null
+++ b/Documentation/core-api/idr.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: CC-BY-SA-4.0
+
+=============
+ID Allocation
+=============
+
+:Author: Matthew Wilcox
+
+Overview
+========
+
+A common problem to solve is allocating identifiers (IDs); generally
+small numbers which identify a thing. Examples include file descriptors,
+process IDs, packet identifiers in networking protocols, SCSI tags
+and device instance numbers. The IDR and the IDA provide a reasonable
+solution to the problem to avoid everybody inventing their own. The IDR
+provides the ability to map an ID to a pointer, while the IDA provides
+only ID allocation, and as a result is much more memory-efficient.
+
+IDR usage
+=========
+
+Start by initialising an IDR, either with :c:func:`DEFINE_IDR`
+for statically allocated IDRs or :c:func:`idr_init` for dynamically
+allocated IDRs.
+
+You can call :c:func:`idr_alloc` to allocate an unused ID. Look up
+the pointer you associated with the ID by calling :c:func:`idr_find`
+and free the ID by calling :c:func:`idr_remove`.
+
+If you need to change the pointer associated with an ID, you can call
+:c:func:`idr_replace`. One common reason to do this is to reserve an
+ID by passing a ``NULL`` pointer to the allocation function; initialise the
+object with the reserved ID and finally insert the initialised object
+into the IDR.
+
+Some users need to allocate IDs larger than ``INT_MAX``. So far all of
+these users have been content with a ``UINT_MAX`` limit, and they use
+:c:func:`idr_alloc_u32`. If you need IDs that will not fit in a u32,
+we will work with you to address your needs.
+
+If you need to allocate IDs sequentially, you can use
+:c:func:`idr_alloc_cyclic`. The IDR becomes less efficient when dealing
+with larger IDs, so using this function comes at a slight cost.
+
+To perform an action on all pointers used by the IDR, you can
+either use the callback-based :c:func:`idr_for_each` or the
+iterator-style :c:func:`idr_for_each_entry`. You may need to use
+:c:func:`idr_for_each_entry_continue` to continue an iteration. You can
+also use :c:func:`idr_get_next` if the iterator doesn't fit your needs.
+
+When you have finished using an IDR, you can call :c:func:`idr_destroy`
+to release the memory used by the IDR. This will not free the objects
+pointed to from the IDR; if you want to do that, use one of the iterators
+to do it.
+
+You can use :c:func:`idr_is_empty` to find out whether there are any
+IDs currently allocated.
+
+If you need to take a lock while allocating a new ID from the IDR,
+you may need to pass a restrictive set of GFP flags, which can lead
+to the IDR being unable to allocate memory. To work around this,
+you can call :c:func:`idr_preload` before taking the lock, and then
+:c:func:`idr_preload_end` after the allocation.
+
+.. kernel-doc:: include/linux/idr.h
+ :doc: idr sync
+
+IDA usage
+=========
+
+.. kernel-doc:: lib/idr.c
+ :doc: IDA description
+
+Functions and structures
+========================
+
+.. kernel-doc:: include/linux/idr.h
+.. kernel-doc:: lib/idr.c
diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst
index 1b1fd01990b5..c670a8031786 100644
--- a/Documentation/core-api/index.rst
+++ b/Documentation/core-api/index.rst
@@ -16,6 +16,7 @@ Core utilities
atomic_ops
refcount-vs-atomic
cpu_hotplug
+ idr
local_ops
workqueue
genericirq
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index e7fadf02c511..ff335f8aeb39 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -103,18 +103,6 @@ CRC Functions
.. kernel-doc:: lib/crc-itu-t.c
:export:
-idr/ida Functions
------------------
-
-.. kernel-doc:: include/linux/idr.h
- :doc: idr sync
-
-.. kernel-doc:: lib/idr.c
- :doc: IDA description
-
-.. kernel-doc:: lib/idr.c
- :export:
-
Math Functions in Linux
=======================
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 434c49cc7330..61546ac578d6 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -291,3 +291,7 @@ For example:
/* Do something with pos */
pos->frequency = ...
}
+
+If you need to work with the position of pos within driver_freq_table,
+do not subtract the pointers, as it is quite costly. Instead, use the
+macros cpufreq_for_each_entry_idx() and cpufreq_for_each_valid_entry_idx().
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index 857df929a654..1fd5d69647ca 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -16,6 +16,9 @@ Required Properties:
- "renesas,ipmmu-r8a7793" for the R8A7793 (R-Car M2-N) IPMMU.
- "renesas,ipmmu-r8a7794" for the R8A7794 (R-Car E2) IPMMU.
- "renesas,ipmmu-r8a7795" for the R8A7795 (R-Car H3) IPMMU.
+ - "renesas,ipmmu-r8a7796" for the R8A7796 (R-Car M3-W) IPMMU.
+ - "renesas,ipmmu-r8a77970" for the R8A77970 (R-Car V3M) IPMMU.
+ - "renesas,ipmmu-r8a77995" for the R8A77995 (R-Car D3) IPMMU.
- "renesas,ipmmu-vmsa" for generic R-Car Gen2 VMSA-compatible IPMMU.
- reg: Base address and size of the IPMMU registers.
diff --git a/Documentation/driver-api/s390-drivers.rst b/Documentation/driver-api/s390-drivers.rst
index ecf8851d3565..30e6aa7e160b 100644
--- a/Documentation/driver-api/s390-drivers.rst
+++ b/Documentation/driver-api/s390-drivers.rst
@@ -22,9 +22,28 @@ While most I/O devices on a s390 system are typically driven through the
channel I/O mechanism described here, there are various other methods
(like the diag interface). These are out of the scope of this document.
+The s390 common I/O layer also provides access to some devices that are
+not strictly considered I/O devices. They are considered here as well,
+although they are not the focus of this document.
+
Some additional information can also be found in the kernel source under
Documentation/s390/driver-model.txt.
+The css bus
+===========
+
+The css bus contains the subchannels available on the system. They fall
+into several categories:
+
+* Standard I/O subchannels, for use by the system. They have a child
+ device on the ccw bus and are described below.
+* I/O subchannels bound to the vfio-ccw driver. See
+ Documentation/s390/vfio-ccw.txt.
+* Message subchannels. No Linux driver currently exists.
+* CHSC subchannels (at most one). The chsc subchannel driver can be used
+ to send asynchronous chsc commands.
+* eADM subchannels. Used for talking to storage class memory.
+
The ccw bus
===========
@@ -102,10 +121,15 @@ ccw group devices
Generic interfaces
==================
-Some interfaces are available to other drivers that do not necessarily
-have anything to do with the busses described above, but still are
-indirectly using basic infrastructure in the common I/O layer. One
-example is the support for adapter interrupts.
+The following section contains interfaces in use not only by drivers
+dealing with ccw devices, but drivers for various other s390 hardware
+as well.
+
+Adapter interrupts
+------------------
+
+The common I/O layer provides helper functions for dealing with adapter
+interrupts and interrupt vectors.
.. kernel-doc:: drivers/s390/cio/airq.c
:export:
diff --git a/Documentation/filesystems/afs.txt b/Documentation/filesystems/afs.txt
index ba99b5ac4fd8..c5254f6d234d 100644
--- a/Documentation/filesystems/afs.txt
+++ b/Documentation/filesystems/afs.txt
@@ -7,6 +7,7 @@ Contents:
- Overview.
- Usage.
- Mountpoints.
+ - Dynamic root.
- Proc filesystem.
- The cell database.
- Security.
@@ -127,6 +128,22 @@ mounted on /afs in one go by doing:
umount /afs
+============
+DYNAMIC ROOT
+============
+
+A mount option is available to create a serverless mount that is only usable
+for dynamic lookup. Creating such a mount can be done by, for example:
+
+ mount -t afs none /afs -o dyn
+
+This creates a mount that just has an empty directory at the root. Attempting
+to look up a name in this directory will cause a mountpoint to be created that
+looks up a cell of the same name, for example:
+
+ ls /afs/grand.central.org/
+
+
===============
PROC FILESYSTEM
===============
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 69fe1a8b7ad1..3da73aabff5a 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -26,3 +26,6 @@ s390-diag.txt
- Diagnose hypercall description (for IBM S/390)
timekeeping.txt
- timekeeping virtualization for x86-based architectures.
+amd-memory-encryption.txt
+ - notes on AMD Secure Encrypted Virtualization feature and SEV firmware
+ command description
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
new file mode 100644
index 000000000000..71d6d257074f
--- /dev/null
+++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
@@ -0,0 +1,247 @@
+======================================
+Secure Encrypted Virtualization (SEV)
+======================================
+
+Overview
+========
+
+Secure Encrypted Virtualization (SEV) is a feature found on AMD processors.
+
+SEV is an extension to the AMD-V architecture which supports running
+virtual machines (VMs) under the control of a hypervisor. When enabled,
+the memory contents of a VM will be transparently encrypted with a key
+unique to that VM.
+
+The hypervisor can determine the SEV support through the CPUID
+instruction. The CPUID function 0x8000001f reports information related
+to SEV::
+
+ 0x8000001f[eax]:
+ Bit[1] indicates support for SEV
+ ...
+ [ecx]:
+ Bits[31:0] Number of encrypted guests supported simultaneously
+
+If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
+(MSR_K7_HWCR) can be used to determine if it can be enabled::
+
+ 0xc001_0010:
+ Bit[23] 1 = memory encryption can be enabled
+ 0 = memory encryption can not be enabled
+
+ 0xc001_0015:
+ Bit[0] 1 = memory encryption can be enabled
+ 0 = memory encryption can not be enabled
+
+When SEV support is available, it can be enabled in a specific VM by
+setting the SEV bit before executing VMRUN.::
+
+ VMCB[0x90]:
+ Bit[1] 1 = SEV is enabled
+ 0 = SEV is disabled
+
+SEV hardware uses ASIDs to associate a memory encryption key with a VM.
+Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
+defined in the CPUID 0x8000001f[ecx] field.
+
+SEV Key Management
+==================
+
+The SEV guest key management is handled by a separate processor called the AMD
+Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
+key management interface to perform common hypervisor activities such as
+encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
+information, see the SEV Key Management spec [api-spec]_
+
+KVM implements the following commands to support common lifecycle events of SEV
+guests, such as launching, running, snapshotting, migrating and decommissioning.
+
+1. KVM_SEV_INIT
+---------------
+
+The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
+context. In a typical workflow, this command should be the first command issued.
+
+Returns: 0 on success, -negative on error
+
+2. KVM_SEV_LAUNCH_START
+-----------------------
+
+The KVM_SEV_LAUNCH_START command is used for creating the memory encryption
+context. To create the encryption context, user must provide a guest policy,
+the owner's public Diffie-Hellman (PDH) key and session information.
+
+Parameters: struct kvm_sev_launch_start (in/out)
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_launch_start {
+ __u32 handle; /* if zero then firmware creates a new handle */
+ __u32 policy; /* guest's policy */
+
+ __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */
+ __u32 dh_len;
+
+ __u64 session_addr; /* userspace address which points to the guest session information */
+ __u32 session_len;
+ };
+
+On success, the 'handle' field contains a new handle and on error, a negative value.
+
+For more details, see SEV spec Section 6.2.
+
+3. KVM_SEV_LAUNCH_UPDATE_DATA
+-----------------------------
+
+The KVM_SEV_LAUNCH_UPDATE_DATA is used for encrypting a memory region. It also
+calculates a measurement of the memory contents. The measurement is a signature
+of the memory contents that can be sent to the guest owner as an attestation
+that the memory was encrypted correctly by the firmware.
+
+Parameters (in): struct kvm_sev_launch_update_data
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_launch_update {
+ __u64 uaddr; /* userspace address to be encrypted (must be 16-byte aligned) */
+ __u32 len; /* length of the data to be encrypted (must be 16-byte aligned) */
+ };
+
+For more details, see SEV spec Section 6.3.
+
+4. KVM_SEV_LAUNCH_MEASURE
+-------------------------
+
+The KVM_SEV_LAUNCH_MEASURE command is used to retrieve the measurement of the
+data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may
+wait to provide the guest with confidential information until it can verify the
+measurement. Since the guest owner knows the initial contents of the guest at
+boot, the measurement can be verified by comparing it to what the guest owner
+expects.
+
+Parameters (in): struct kvm_sev_launch_measure
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_launch_measure {
+ __u64 uaddr; /* where to copy the measurement */
+ __u32 len; /* length of measurement blob */
+ };
+
+For more details on the measurement verification flow, see SEV spec Section 6.4.
+
+5. KVM_SEV_LAUNCH_FINISH
+------------------------
+
+After completion of the launch flow, the KVM_SEV_LAUNCH_FINISH command can be
+issued to make the guest ready for the execution.
+
+Returns: 0 on success, -negative on error
+
+6. KVM_SEV_GUEST_STATUS
+-----------------------
+
+The KVM_SEV_GUEST_STATUS command is used to retrieve status information about a
+SEV-enabled guest.
+
+Parameters (out): struct kvm_sev_guest_status
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_guest_status {
+ __u32 handle; /* guest handle */
+ __u32 policy; /* guest policy */
+ __u8 state; /* guest state (see enum below) */
+ };
+
+SEV guest state:
+
+::
+
+ enum {
+ SEV_STATE_INVALID = 0;
+ SEV_STATE_LAUNCHING, /* guest is currently being launched */
+ SEV_STATE_SECRET, /* guest is being launched and ready to accept the ciphertext data */
+ SEV_STATE_RUNNING, /* guest is fully launched and running */
+ SEV_STATE_RECEIVING, /* guest is being migrated in from another SEV machine */
+ SEV_STATE_SENDING /* guest is getting migrated out to another SEV machine */
+ };
+
+7. KVM_SEV_DBG_DECRYPT
+----------------------
+
+The KVM_SEV_DEBUG_DECRYPT command can be used by the hypervisor to request the
+firmware to decrypt the data at the given memory region.
+
+Parameters (in): struct kvm_sev_dbg
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_dbg {
+ __u64 src_uaddr; /* userspace address of data to decrypt */
+ __u64 dst_uaddr; /* userspace address of destination */
+ __u32 len; /* length of memory region to decrypt */
+ };
+
+The command returns an error if the guest policy does not allow debugging.
+
+8. KVM_SEV_DBG_ENCRYPT
+----------------------
+
+The KVM_SEV_DEBUG_ENCRYPT command can be used by the hypervisor to request the
+firmware to encrypt the data at the given memory region.
+
+Parameters (in): struct kvm_sev_dbg
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_dbg {
+ __u64 src_uaddr; /* userspace address of data to encrypt */
+ __u64 dst_uaddr; /* userspace address of destination */
+ __u32 len; /* length of memory region to encrypt */
+ };
+
+The command returns an error if the guest policy does not allow debugging.
+
+9. KVM_SEV_LAUNCH_SECRET
+------------------------
+
+The KVM_SEV_LAUNCH_SECRET command can be used by the hypervisor to inject secret
+data after the measurement has been validated by the guest owner.
+
+Parameters (in): struct kvm_sev_launch_secret
+
+Returns: 0 on success, -negative on error
+
+::
+
+ struct kvm_sev_launch_secret {
+ __u64 hdr_uaddr; /* userspace address containing the packet header */
+ __u32 hdr_len;
+
+ __u64 guest_uaddr; /* the guest memory region where the secret should be injected */
+ __u32 guest_len;
+
+ __u64 trans_uaddr; /* the hypervisor memory region which contains the secret */
+ __u32 trans_len;
+ };
+
+References
+==========
+
+.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
+.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
+.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index fc3ae951bc07..792fa8717d13 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1841,6 +1841,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_DBSR | 32
PPC | KVM_REG_PPC_TIDR | 64
PPC | KVM_REG_PPC_PSSCR | 64
+ PPC | KVM_REG_PPC_DEC_EXPIRY | 64
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
@@ -3403,7 +3404,7 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
or if no page table is present for the addresses (e.g. when using
hugepages).
-4.108 KVM_PPC_GET_CPU_CHAR
+4.109 KVM_PPC_GET_CPU_CHAR
Capability: KVM_CAP_PPC_GET_CPU_CHAR
Architectures: powerpc
@@ -3449,6 +3450,57 @@ array bounds check and the array access.
These fields use the same bit definitions as the new
H_GET_CPU_CHARACTERISTICS hypercall.
+4.110 KVM_MEMORY_ENCRYPT_OP
+
+Capability: basic
+Architectures: x86
+Type: system
+Parameters: an opaque platform specific structure (in/out)
+Returns: 0 on success; -1 on error
+
+If the platform supports creating encrypted VMs then this ioctl can be used
+for issuing platform-specific memory encryption commands to manage those
+encrypted VMs.
+
+Currently, this ioctl is used for issuing Secure Encrypted Virtualization
+(SEV) commands on AMD Processors. The SEV commands are defined in
+Documentation/virtual/kvm/amd-memory-encryption.txt.
+
+4.111 KVM_MEMORY_ENCRYPT_REG_REGION
+
+Capability: basic
+Architectures: x86
+Type: system
+Parameters: struct kvm_enc_region (in)
+Returns: 0 on success; -1 on error
+
+This ioctl can be used to register a guest memory region which may
+contain encrypted data (e.g. guest RAM, SMRAM etc).
+
+It is used in the SEV-enabled guest. When encryption is enabled, a guest
+memory region may contain encrypted data. The SEV memory encryption
+engine uses a tweak such that two identical plaintext pages, each at
+different locations will have differing ciphertexts. So swapping or
+moving ciphertext of those pages will not result in plaintext being
+swapped. So relocating (or migrating) physical backing pages for the SEV
+guest will require some additional steps.
+
+Note: The current SEV key management spec does not provide commands to
+swap or migrate (move) ciphertext pages. Hence, for now we pin the guest
+memory region registered with the ioctl.
+
+4.112 KVM_MEMORY_ENCRYPT_UNREG_REGION
+
+Capability: basic
+Architectures: x86
+Type: system
+Parameters: struct kvm_enc_region (in)
+Returns: 0 on success; -1 on error
+
+This ioctl can be used to unregister the guest memory region registered
+with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above.
+
+
5. The kvm_run structure
------------------------
diff --git a/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt b/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt
deleted file mode 100644
index 38bca2835278..000000000000
--- a/Documentation/virtual/kvm/arm/vgic-mapped-irqs.txt
+++ /dev/null
@@ -1,187 +0,0 @@
-KVM/ARM VGIC Forwarded Physical Interrupts
-==========================================
-
-The KVM/ARM code implements software support for the ARM Generic
-Interrupt Controller's (GIC's) hardware support for virtualization by
-allowing software to inject virtual interrupts to a VM, which the guest
-OS sees as regular interrupts. The code is famously known as the VGIC.
-
-Some of these virtual interrupts, however, correspond to physical
-interrupts from real physical devices. One example could be the
-architected timer, which itself supports virtualization, and therefore
-lets a guest OS program the hardware device directly to raise an
-interrupt at some point in time. When such an interrupt is raised, the
-host OS initially handles the interrupt and must somehow signal this
-event as a virtual interrupt to the guest. Another example could be a
-passthrough device, where the physical interrupts are initially handled
-by the host, but the device driver for the device lives in the guest OS
-and KVM must therefore somehow inject a virtual interrupt on behalf of
-the physical one to the guest OS.
-
-These virtual interrupts corresponding to a physical interrupt on the
-host are called forwarded physical interrupts, but are also sometimes
-referred to as 'virtualized physical interrupts' and 'mapped interrupts'.
-
-Forwarded physical interrupts are handled slightly differently compared
-to virtual interrupts generated purely by a software emulated device.
-
-
-The HW bit
-----------
-Virtual interrupts are signalled to the guest by programming the List
-Registers (LRs) on the GIC before running a VCPU. The LR is programmed
-with the virtual IRQ number and the state of the interrupt (Pending,
-Active, or Pending+Active). When the guest ACKs and EOIs a virtual
-interrupt, the LR state moves from Pending to Active, and finally to
-inactive.
-
-The LRs include an extra bit, called the HW bit. When this bit is set,
-KVM must also program an additional field in the LR, the physical IRQ
-number, to link the virtual with the physical IRQ.
-
-When the HW bit is set, KVM must EITHER set the Pending OR the Active
-bit, never both at the same time.
-
-Setting the HW bit causes the hardware to deactivate the physical
-interrupt on the physical distributor when the guest deactivates the
-corresponding virtual interrupt.
-
-
-Forwarded Physical Interrupts Life Cycle
-----------------------------------------
-
-The state of forwarded physical interrupts is managed in the following way:
-
- - The physical interrupt is acked by the host, and becomes active on
- the physical distributor (*).
- - KVM sets the LR.Pending bit, because this is the only way the GICV
- interface is going to present it to the guest.
- - LR.Pending will stay set as long as the guest has not acked the interrupt.
- - LR.Pending transitions to LR.Active on the guest read of the IAR, as
- expected.
- - On guest EOI, the *physical distributor* active bit gets cleared,
- but the LR.Active is left untouched (set).
- - KVM clears the LR on VM exits when the physical distributor
- active state has been cleared.
-
-(*): The host handling is slightly more complicated. For some forwarded
-interrupts (shared), KVM directly sets the active state on the physical
-distributor before entering the guest, because the interrupt is never actually
-handled on the host (see details on the timer as an example below). For other
-forwarded interrupts (non-shared) the host does not deactivate the interrupt
-when the host ISR completes, but leaves the interrupt active until the guest
-deactivates it. Leaving the interrupt active is allowed, because Linux
-configures the physical GIC with EOIMode=1, which causes EOI operations to
-perform a priority drop allowing the GIC to receive other interrupts of the
-default priority.
-
-
-Forwarded Edge and Level Triggered PPIs and SPIs
-------------------------------------------------
-Forwarded physical interrupts injected should always be active on the
-physical distributor when injected to a guest.
-
-Level-triggered interrupts will keep the interrupt line to the GIC
-asserted, typically until the guest programs the device to deassert the
-line. This means that the interrupt will remain pending on the physical
-distributor until the guest has reprogrammed the device. Since we
-always run the VM with interrupts enabled on the CPU, a pending
-interrupt will exit the guest as soon as we switch into the guest,
-preventing the guest from ever making progress as the process repeats
-over and over. Therefore, the active state on the physical distributor
-must be set when entering the guest, preventing the GIC from forwarding
-the pending interrupt to the CPU. As soon as the guest deactivates the
-interrupt, the physical line is sampled by the hardware again and the host
-takes a new interrupt if and only if the physical line is still asserted.
-
-Edge-triggered interrupts do not exhibit the same problem with
-preventing guest execution that level-triggered interrupts do. One
-option is to not use HW bit at all, and inject edge-triggered interrupts
-from a physical device as pure virtual interrupts. But that would
-potentially slow down handling of the interrupt in the guest, because a
-physical interrupt occurring in the middle of the guest ISR would
-preempt the guest for the host to handle the interrupt. Additionally,
-if you configure the system to handle interrupts on a separate physical
-core from that running your VCPU, you still have to interrupt the VCPU
-to queue the pending state onto the LR, even though the guest won't use
-this information until the guest ISR completes. Therefore, the HW
-bit should always be set for forwarded edge-triggered interrupts. With
-the HW bit set, the virtual interrupt is injected and additional
-physical interrupts occurring before the guest deactivates the interrupt
-simply mark the state on the physical distributor as Pending+Active. As
-soon as the guest deactivates the interrupt, the host takes another
-interrupt if and only if there was a physical interrupt between injecting
-the forwarded interrupt to the guest and the guest deactivating the
-interrupt.
-
-Consequently, whenever we schedule a VCPU with one or more LRs with the
-HW bit set, the interrupt must also be active on the physical
-distributor.
-
-
-Forwarded LPIs
---------------
-LPIs, introduced in GICv3, are always edge-triggered and do not have an
-active state. They become pending when a device signal them, and as
-soon as they are acked by the CPU, they are inactive again.
-
-It therefore doesn't make sense, and is not supported, to set the HW bit
-for physical LPIs that are forwarded to a VM as virtual interrupts,
-typically virtual SPIs.
-
-For LPIs, there is no other choice than to preempt the VCPU thread if
-necessary, and queue the pending state onto the LR.
-
-
-Putting It Together: The Architected Timer
-------------------------------------------
-The architected timer is a device that signals interrupts with level
-triggered semantics. The timer hardware is directly accessed by VCPUs
-which program the timer to fire at some point in time. Each VCPU on a
-system programs the timer to fire at different times, and therefore the
-hardware is multiplexed between multiple VCPUs. This is implemented by
-context-switching the timer state along with each VCPU thread.
-
-However, this means that a scenario like the following is entirely
-possible, and in fact, typical:
-
-1. KVM runs the VCPU
-2. The guest programs the time to fire in T+100
-3. The guest is idle and calls WFI (wait-for-interrupts)
-4. The hardware traps to the host
-5. KVM stores the timer state to memory and disables the hardware timer
-6. KVM schedules a soft timer to fire in T+(100 - time since step 2)
-7. KVM puts the VCPU thread to sleep (on a waitqueue)
-8. The soft timer fires, waking up the VCPU thread
-9. KVM reprograms the timer hardware with the VCPU's values
-10. KVM marks the timer interrupt as active on the physical distributor
-11. KVM injects a forwarded physical interrupt to the guest
-12. KVM runs the VCPU
-
-Notice that KVM injects a forwarded physical interrupt in step 11 without
-the corresponding interrupt having actually fired on the host. That is
-exactly why we mark the timer interrupt as active in step 10, because
-the active state on the physical distributor is part of the state
-belonging to the timer hardware, which is context-switched along with
-the VCPU thread.
-
-If the guest does not idle because it is busy, the flow looks like this
-instead:
-
-1. KVM runs the VCPU
-2. The guest programs the time to fire in T+100
-4. At T+100 the timer fires and a physical IRQ causes the VM to exit
- (note that this initially only traps to EL2 and does not run the host ISR
- until KVM has returned to the host).
-5. With interrupts still disabled on the CPU coming back from the guest, KVM
- stores the virtual timer state to memory and disables the virtual hw timer.
-6. KVM looks at the timer state (in memory) and injects a forwarded physical
- interrupt because it concludes the timer has expired.
-7. KVM marks the timer interrupt as active on the physical distributor
-7. KVM enables the timer, enables interrupts, and runs the VCPU
-
-Notice that again the forwarded physical interrupt is injected to the
-guest without having actually been handled on the host. In this case it
-is because the physical interrupt is never actually seen by the host because the
-timer is disabled upon guest return, and the virtual forwarded interrupt is
-injected on the KVM guest entry path.
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 3c65feb83010..dcab6dc11e3b 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit
|| || before enabling paravirtualized
|| || spinlock support.
------------------------------------------------------------------------------
+KVM_FEATURE_PV_TLB_FLUSH || 9 || guest checks this feature bit
+ || || before enabling paravirtualized
+ || || tlb flush.
+------------------------------------------------------------------------------
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|| || per-cpu warps are expected in
|| || kvmclock.
diff --git a/MAINTAINERS b/MAINTAINERS
index cf4258338587..3bdc260e36b7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7748,7 +7748,9 @@ F: arch/powerpc/kernel/kvm*
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
M: Christian Borntraeger <borntraeger@de.ibm.com>
-M: Cornelia Huck <cohuck@redhat.com>
+M: Janosch Frank <frankja@linux.vnet.ibm.com>
+R: David Hildenbrand <david@redhat.com>
+R: Cornelia Huck <cohuck@redhat.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
@@ -10331,7 +10333,8 @@ F: fs/ocfs2/
ORANGEFS FILESYSTEM
M: Mike Marshall <hubcap@omnibond.com>
-L: pvfs2-developers@beowulf-underground.org (subscribers-only)
+R: Martin Brandenburg <martin@omnibond.com>
+L: devel@lists.orangefs.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
S: Supported
F: fs/orangefs/
@@ -10799,11 +10802,9 @@ S: Maintained
F: drivers/pci/dwc/*spear*
PCMCIA SUBSYSTEM
-P: Linux PCMCIA Team
-L: linux-pcmcia@lists.infradead.org
-W: http://lists.infradead.org/mailman/listinfo/linux-pcmcia
+M: Dominik Brodowski <linux@dominikbrodowski.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brodo/pcmcia.git
-S: Maintained
+S: Odd Fixes
F: Documentation/pcmcia/
F: tools/pcmcia/
F: drivers/pcmcia/
@@ -12027,6 +12028,7 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
M: Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+M: Halil Pasic <pasic@linux.vnet.ibm.com>
L: linux-s390@vger.kernel.org
L: kvm@vger.kernel.org
S: Supported
diff --git a/Makefile b/Makefile
index d192dd826cce..cd9145c0878d 100644
--- a/Makefile
+++ b/Makefile
@@ -729,7 +729,6 @@ endif
ifeq ($(cc-name),clang)
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
@@ -747,9 +746,9 @@ else
# These warnings generated too much noise in a regular build.
# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 3d22eb87f919..9003bd19cb70 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -131,7 +131,7 @@ static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
- return cpsr_mode > USR_MODE;;
+ return cpsr_mode > USR_MODE;
}
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index acbf9ec7b396..248b930563e5 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -48,6 +48,8 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -306,4 +308,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {}
static inline void kvm_arm_vhe_guest_enter(void) {}
static inline void kvm_arm_vhe_guest_exit(void) {}
+
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ /* No way to detect it yet, pretend it is not there. */
+ return false;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
index ab20ffa8b9e7..1ab8329e9ff7 100644
--- a/arch/arm/include/asm/kvm_hyp.h
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -21,7 +21,6 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
#include <asm/cp15.h>
-#include <asm/kvm_mmu.h>
#include <asm/vfp.h>
#define __hyp_text __section(.hyp.text) notrace
@@ -69,6 +68,8 @@
#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
+#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
+#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a2d176a308bd..de1b919404e4 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -37,6 +37,8 @@
#include <linux/highmem.h>
#include <asm/cacheflush.h>
+#include <asm/cputype.h>
+#include <asm/kvm_hyp.h>
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>
@@ -83,6 +85,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
return pmd;
}
+static inline pte_t kvm_s2pte_mkexec(pte_t pte)
+{
+ pte_val(pte) &= ~L_PTE_XN;
+ return pte;
+}
+
+static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~PMD_SECT_XN;
+ return pmd;
+}
+
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
@@ -93,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
}
+static inline bool kvm_s2pte_exec(pte_t *pte)
+{
+ return !(pte_val(*pte) & L_PTE_XN);
+}
+
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
@@ -103,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
}
+static inline bool kvm_s2pmd_exec(pmd_t *pmd)
+{
+ return !(pmd_val(*pmd) & PMD_SECT_XN);
+}
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
@@ -126,10 +150,36 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
}
-static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
- kvm_pfn_t pfn,
- unsigned long size)
+static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+{
+ /*
+ * Clean the dcache to the Point of Coherency.
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+ * solution). For that, we need to kmap one page at a time,
+ * and iterate over the range.
+ */
+
+ VM_BUG_ON(size & ~PAGE_MASK);
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
+}
+
+static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
+ unsigned long size)
{
+ u32 iclsz;
+
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -141,23 +191,40 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
- *
- * We need to do this through a kernel mapping (using the
- * user-space mapping has proved to be the wrong
- * solution). For that, we need to kmap one page at a time,
- * and iterate over the range.
*/
VM_BUG_ON(size & ~PAGE_MASK);
+ if (icache_is_vivt_asid_tagged())
+ return;
+
+ if (!icache_is_pipt()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ return;
+ }
+
+ /*
+ * CTR IminLine contains Log2 of the number of words in the
+ * cache line, so we can get the number of words as
+ * 2 << (IminLine - 1). To get the number of bytes, we
+ * multiply by 4 (the number of bytes in a 32-bit word), and
+ * get 4 << (IminLine).
+ */
+ iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
+
while (size) {
void *va = kmap_atomic_pfn(pfn);
+ void *end = va + PAGE_SIZE;
+ void *addr = va;
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+ do {
+ write_sysreg(addr, ICIMVAU);
+ addr += iclsz;
+ } while (addr < end);
- if (icache_is_pipt())
- __cpuc_coherent_user_range((unsigned long)va,
- (unsigned long)va + PAGE_SIZE);
+ dsb(ishst);
+ isb();
size -= PAGE_SIZE;
pfn++;
@@ -165,9 +232,11 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kunmap_atomic(va);
}
- if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
- /* any kind of VIPT cache */
- __flush_icache_all();
+ /* Check if we need to invalidate the BTB */
+ if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
+ write_sysreg(0, BPIALLIS);
+ dsb(ishst);
+ isb();
}
}
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h
deleted file mode 100644
index 6bda945d31fa..000000000000
--- a/arch/arm/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM_KVM_PSCI_H__
-#define __ARM_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1 1
-#define KVM_ARM_PSCI_0_2 2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM_KVM_PSCI_H__ */
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 150ece66ddf3..a757401129f9 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -102,8 +102,8 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
+#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index cf8bf6bf87c4..910bd8dabb3c 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -21,7 +21,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
+#include <kvm/arm_psci.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
- ret = kvm_psci_call(vcpu);
+ ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
- kvm_inject_undefined(vcpu);
+ vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
}
@@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- kvm_inject_undefined(vcpu);
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
index 330c9ce34ba5..ae45ae96aac2 100644
--- a/arch/arm/kvm/hyp/switch.c
+++ b/arch/arm/kvm/hyp/switch.c
@@ -18,6 +18,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
__asm__(".arch_extension virt");
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
index 6d810af2d9fd..c0edd450e104 100644
--- a/arch/arm/kvm/hyp/tlb.c
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -19,6 +19,7 @@
*/
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
/**
* Flush per-VMID TLBs
diff --git a/arch/arm/mach-vt8500/Kconfig b/arch/arm/mach-vt8500/Kconfig
index 1156a585dafc..8841199058ea 100644
--- a/arch/arm/mach-vt8500/Kconfig
+++ b/arch/arm/mach-vt8500/Kconfig
@@ -13,7 +13,6 @@ config ARCH_WM8505
depends on ARCH_MULTI_V5
select ARCH_VT8500
select CPU_ARM926T
- help
config ARCH_WM8750
bool "WonderMedia WM8750"
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 3873dd7b5a32..3c78835bba94 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -116,6 +116,24 @@
.endm
/*
+ * Value prediction barrier
+ */
+ .macro csdb
+ hint #20
+ .endm
+
+/*
+ * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
+ * of bounds.
+ */
+ .macro mask_nospec64, idx, limit, tmp
+ sub \tmp, \idx, \limit
+ bic \tmp, \tmp, \idx
+ and \idx, \idx, \tmp, asr #63
+ csdb
+ .endm
+
+/*
* NOP sequence
*/
.macro nops, num
@@ -418,6 +436,27 @@ alternative_endif
.endm
/*
+ * Macro to perform an instruction cache maintenance for the interval
+ * [start, end)
+ *
+ * start, end: virtual addresses describing the region
+ * label: A label to branch to on user fault.
+ * Corrupts: tmp1, tmp2
+ */
+ .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+ icache_line_size \tmp1, \tmp2
+ sub \tmp2, \tmp1, #1
+ bic \tmp2, \start, \tmp2
+9997:
+USER(\label, ic ivau, \tmp2) // invalidate I line PoU
+ add \tmp2, \tmp2, \tmp1
+ cmp \tmp2, \end
+ b.lo 9997b
+ dsb ish
+ isb
+ .endm
+
+/*
* reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
*/
.macro reset_pmuserenr_el0, tmpreg
@@ -514,7 +553,7 @@ alternative_endif
* phys: physical address, preserved
* ttbr: returns the TTBR value
*/
- .macro phys_to_ttbr, phys, ttbr
+ .macro phys_to_ttbr, ttbr, phys
#ifdef CONFIG_ARM64_PA_BITS_52
orr \ttbr, \phys, \phys, lsr #46
and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
@@ -523,6 +562,29 @@ alternative_endif
#endif
.endm
+ .macro phys_to_pte, pte, phys
+#ifdef CONFIG_ARM64_PA_BITS_52
+ /*
+ * We assume \phys is 64K aligned and this is guaranteed by only
+ * supporting this configuration with 64K pages.
+ */
+ orr \pte, \phys, \phys, lsr #36
+ and \pte, \pte, #PTE_ADDR_MASK
+#else
+ mov \pte, \phys
+#endif
+ .endm
+
+ .macro pte_to_phys, phys, pte
+#ifdef CONFIG_ARM64_PA_BITS_52
+ ubfiz \phys, \pte, #(48 - 16 - 12), #16
+ bfxil \phys, \pte, #16, #32
+ lsl \phys, \phys, #16
+#else
+ and \phys, \pte, #PTE_ADDR_MASK
+#endif
+ .endm
+
/**
* Errata workaround prior to disable MMU. Insert an ISB immediately prior
* to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 77651c49ef44..f11518af96a9 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -32,6 +32,7 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
+#define csdb() asm volatile("hint #20" : : : "memory")
#define mb() dsb(sy)
#define rmb() dsb(ld)
@@ -40,6 +41,27 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
+/*
+ * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
+ * and 0 otherwise.
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long idx,
+ unsigned long sz)
+{
+ unsigned long mask;
+
+ asm volatile(
+ " cmp %1, %2\n"
+ " sbc %0, xzr, xzr\n"
+ : "=r" (mask)
+ : "r" (idx), "Ir" (sz)
+ : "cc");
+
+ csdb();
+ return mask;
+}
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 955130762a3c..bef9f418f089 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -52,6 +52,12 @@
* - start - virtual start address
* - end - virtual end address
*
+ * invalidate_icache_range(start, end)
+ *
+ * Invalidate the I-cache in the region described by start, end.
+ * - start - virtual start address
+ * - end - virtual end address
+ *
* __flush_cache_user_range(start, end)
*
* Ensure coherency between the I-cache and the D-cache in the
@@ -66,6 +72,7 @@
* - size - region size
*/
extern void flush_icache_range(unsigned long start, unsigned long end);
+extern int invalidate_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 5bb2fd4674e7..07fe2479d310 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,9 +48,10 @@ do { \
} while (0)
static inline int
-arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
int oldval = 0, ret, tmp;
+ u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
pagefault_disable();
@@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
}
static inline int
-futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
u32 oldval, u32 newval)
{
int ret = 0;
u32 val, tmp;
+ u32 __user *uaddr;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32)))
return -EFAULT;
+ uaddr = __uaccess_mask_ptr(_uaddr);
uaccess_enable();
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
" prfm pstl1strm, %2\n"
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 82386e860dd2..a780f6714b44 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -123,16 +123,8 @@
/*
* Initial memory map attributes.
*/
-#define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
-#define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG)
-#define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG)
-#else
-#define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS
-#define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS
-#endif
+#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4485ae8e98de..596f8e414a4c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -48,6 +48,8 @@
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
@@ -415,4 +417,10 @@ static inline void kvm_arm_vhe_guest_exit(void)
{
local_daif_restore(DAIF_PROCCTX_NOIRQ);
}
+
+static inline bool kvm_arm_harden_branch_predictor(void)
+{
+ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+}
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 08d3bb66c8b7..f26f9cd70c72 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -20,7 +20,6 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 72e279dbae5f..9679067a1574 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -173,6 +173,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
return pmd;
}
+static inline pte_t kvm_s2pte_mkexec(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_S2_XN;
+ return pte;
+}
+
+static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
+{
+ pmd_val(pmd) &= ~PMD_S2_XN;
+ return pmd;
+}
+
static inline void kvm_set_s2pte_readonly(pte_t *pte)
{
pteval_t old_pteval, pteval;
@@ -191,6 +203,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
}
+static inline bool kvm_s2pte_exec(pte_t *pte)
+{
+ return !(pte_val(*pte) & PTE_S2_XN);
+}
+
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
{
kvm_set_s2pte_readonly((pte_t *)pmd);
@@ -201,6 +218,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
return kvm_s2pte_readonly((pte_t *)pmd);
}
+static inline bool kvm_s2pmd_exec(pmd_t *pmd)
+{
+ return !(pmd_val(*pmd) & PMD_S2_XN);
+}
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
@@ -230,21 +252,25 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
- kvm_pfn_t pfn,
- unsigned long size)
+static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
void *va = page_address(pfn_to_page(pfn));
kvm_flush_dcache_to_poc(va, size);
+}
+static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
+ unsigned long size)
+{
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
__flush_icache_all();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
- flush_icache_range((unsigned long)va,
- (unsigned long)va + size);
+ void *va = page_address(pfn_to_page(pfn));
+
+ invalidate_icache_range((unsigned long)va,
+ (unsigned long)va + size);
}
}
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h
deleted file mode 100644
index bc39e557c56c..000000000000
--- a/arch/arm64/include/asm/kvm_psci.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_KVM_PSCI_H__
-#define __ARM64_KVM_PSCI_H__
-
-#define KVM_ARM_PSCI_0_1 1
-#define KVM_ARM_PSCI_0_2 2
-
-int kvm_psci_version(struct kvm_vcpu *vcpu);
-int kvm_psci_call(struct kvm_vcpu *vcpu);
-
-#endif /* __ARM64_KVM_PSCI_H__ */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index f42836da8723..cdfe3e657a9e 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -187,9 +187,11 @@
*/
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define PTE_S2_XN (_AT(pteval_t, 2) << 53) /* XN[1:0] */
#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+#define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 22a926825e3f..108ecad7acc5 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -37,13 +37,11 @@
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-#define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG)
-#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG)
-#else
-#define PROT_DEFAULT _PROT_DEFAULT
-#define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT
-#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
+#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+
+#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
+#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -55,22 +53,22 @@
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
-#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
-#define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG)
+#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
+#define _HYP_PAGE_DEFAULT _PAGE_DEFAULT
-#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
+#define PAGE_KERNEL __pgprot(PROT_NORMAL)
+#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
+#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
+#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
#define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
#define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
#define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN)
+#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 6db43ebd648d..fce604e3e599 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
#define TASK_SIZE_64 (UL(1) << VA_BITS)
+#define KERNEL_DS UL(-1)
+#define USER_DS (TASK_SIZE_64 - 1)
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index fdb827c7832f..ebdae15d665d 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -87,8 +87,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" cbnz %w1, 1f\n"
" add %w1, %w0, %3\n"
" casa %w0, %w1, %2\n"
- " and %w1, %w1, #0xffff\n"
- " eor %w1, %w1, %w0, lsr #16\n"
+ " sub %w1, %w1, %3\n"
+ " eor %w1, %w1, %w0\n"
"1:")
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "I" (1 << TICKET_SHIFT)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 59fda5292936..543e11f0f657 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -35,16 +35,20 @@
#include <asm/compiler.h>
#include <asm/extable.h>
-#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
-
-#define USER_DS TASK_SIZE_64
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+ /*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
@@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-#define __range_ok(addr, size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
- : "=&r" (flag), "=&r" (roksum) \
- : "1" (__addr), "Ir" (size), \
- "r" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit;
+
+ __chk_user_ptr(addr);
+ asm volatile(
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
+ // 1: X = A + B; X' = X % 2^64
+ " adds %0, %0, %2\n"
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+ " csel %1, xzr, %1, hi\n"
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+ // to compensate for the carry flag being set in step 4. For
+ // X > 2^64, X' merely has to remain nonzero, which it does.
+ " csinv %0, %0, xzr, cc\n"
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+ // comes from the carry in being clear. Otherwise, we are
+ // testing X' - C == 0, subject to the previous adjustments.
+ " sbcs xzr, %0, %1\n"
+ " cset %0, ls\n"
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+ return addr;
+}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
@@ -221,6 +235,26 @@ static inline void uaccess_enable_not_uao(void)
}
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if above the
+ * current addr_limit.
+ */
+#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
+static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
+{
+ void __user *safe_ptr;
+
+ asm volatile(
+ " bics xzr, %1, %2\n"
+ " csel %0, %1, xzr, eq\n"
+ : "=&r" (safe_ptr)
+ : "r" (ptr), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* The "__xxx" versions of the user access functions do not verify the address
* space - it must have been done previously with a separate "access_ok()"
* call.
@@ -272,28 +306,33 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-#define __get_user(x, ptr) \
+#define __get_user_check(x, ptr, err) \
({ \
- int __gu_err = 0; \
- __get_user_err((x), (ptr), __gu_err); \
- __gu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __get_user_err((x), __p, (err)); \
+ } else { \
+ (x) = 0; (err) = -EFAULT; \
+ } \
})
#define __get_user_error(x, ptr, err) \
({ \
- __get_user_err((x), (ptr), (err)); \
+ __get_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define get_user(x, ptr) \
+#define __get_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
- __get_user((x), __p) : \
- ((x) = 0, -EFAULT); \
+ int __gu_err = 0; \
+ __get_user_check((x), (ptr), __gu_err); \
+ __gu_err; \
})
+#define get_user __get_user
+
#define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
asm volatile( \
"1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
@@ -336,43 +375,63 @@ do { \
uaccess_disable_not_uao(); \
} while (0)
-#define __put_user(x, ptr) \
+#define __put_user_check(x, ptr, err) \
({ \
- int __pu_err = 0; \
- __put_user_err((x), (ptr), __pu_err); \
- __pu_err; \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \
+ __p = uaccess_mask_ptr(__p); \
+ __put_user_err((x), __p, (err)); \
+ } else { \
+ (err) = -EFAULT; \
+ } \
})
#define __put_user_error(x, ptr, err) \
({ \
- __put_user_err((x), (ptr), (err)); \
+ __put_user_check((x), (ptr), (err)); \
(void)0; \
})
-#define put_user(x, ptr) \
+#define __put_user(x, ptr) \
({ \
- __typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
- access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
- __put_user((x), __p) : \
- -EFAULT; \
+ int __pu_err = 0; \
+ __put_user_check((x), (ptr), __pu_err); \
+ __pu_err; \
})
+#define put_user __put_user
+
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
-#define raw_copy_from_user __arch_copy_from_user
+#define raw_copy_from_user(to, from, n) \
+({ \
+ __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
+})
+
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define raw_copy_to_user __arch_copy_to_user
-extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+#define raw_copy_to_user(to, from, n) \
+({ \
+ __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
+})
+
+extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
+#define raw_copy_in_user(to, from, n) \
+({ \
+ __arch_copy_in_user(__uaccess_mask_ptr(to), \
+ __uaccess_mask_ptr(from), (n)); \
+})
+
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
+extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
+static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
- n = __clear_user(to, n);
+ n = __arch_clear_user(__uaccess_mask_ptr(to), n);
return n;
}
+#define clear_user __clear_user
extern long strncpy_from_user(char *dest, const char __user *src, long count);
@@ -386,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
{
kasan_check_write(dst, size);
- return __copy_user_flushcache(dst, src, size);
+ return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
}
#endif
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 252396a96c78..7b09487ff8fb 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -230,10 +230,10 @@ void __init acpi_boot_table_init(void)
done:
if (acpi_disabled) {
- if (earlycon_init_is_deferred)
+ if (earlycon_acpi_spcr_enable)
early_init_dt_scan_chosen_stdout();
} else {
- parse_spcr(earlycon_init_is_deferred);
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
if (IS_ENABLED(CONFIG_ACPI_BGRT))
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
}
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 67368c7329c0..66be504edb6c 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
EXPORT_SYMBOL(__arch_copy_from_user);
EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(raw_copy_in_user);
+EXPORT_SYMBOL(__arch_clear_user);
+EXPORT_SYMBOL(__arch_copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
index 76225c2611ea..e5de33513b5d 100644
--- a/arch/arm64/kernel/bpi.S
+++ b/arch/arm64/kernel/bpi.S
@@ -17,6 +17,7 @@
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
.macro ventry target
.rept 31
@@ -53,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
vectors __kvm_hyp_vector
.endr
ENTRY(__bp_harden_hyp_vecs_end)
-ENTRY(__psci_hyp_bp_inval_start)
- sub sp, sp, #(8 * 18)
- stp x16, x17, [sp, #(16 * 0)]
- stp x14, x15, [sp, #(16 * 1)]
- stp x12, x13, [sp, #(16 * 2)]
- stp x10, x11, [sp, #(16 * 3)]
- stp x8, x9, [sp, #(16 * 4)]
- stp x6, x7, [sp, #(16 * 5)]
- stp x4, x5, [sp, #(16 * 6)]
- stp x2, x3, [sp, #(16 * 7)]
- stp x0, x1, [sp, #(16 * 8)]
- mov x0, #0x84000000
- smc #0
- ldp x16, x17, [sp, #(16 * 0)]
- ldp x14, x15, [sp, #(16 * 1)]
- ldp x12, x13, [sp, #(16 * 2)]
- ldp x10, x11, [sp, #(16 * 3)]
- ldp x8, x9, [sp, #(16 * 4)]
- ldp x6, x7, [sp, #(16 * 5)]
- ldp x4, x5, [sp, #(16 * 6)]
- ldp x2, x3, [sp, #(16 * 7)]
- ldp x0, x1, [sp, #(16 * 8)]
- add sp, sp, #(8 * 18)
-ENTRY(__psci_hyp_bp_inval_end)
ENTRY(__qcom_hyp_sanitize_link_stack_start)
stp x29, x30, [sp, #-16]!
@@ -85,3 +62,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start)
.endr
ldp x29, x30, [sp], #16
ENTRY(__qcom_hyp_sanitize_link_stack_end)
+
+.macro smccc_workaround_1 inst
+ sub sp, sp, #(8 * 4)
+ stp x2, x3, [sp, #(8 * 0)]
+ stp x0, x1, [sp, #(8 * 2)]
+ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
+ \inst #0
+ ldp x2, x3, [sp, #(8 * 0)]
+ ldp x0, x1, [sp, #(8 * 2)]
+ add sp, sp, #(8 * 4)
+.endm
+
+ENTRY(__smccc_workaround_1_smc_start)
+ smccc_workaround_1 smc
+ENTRY(__smccc_workaround_1_smc_end)
+
+ENTRY(__smccc_workaround_1_hvc_start)
+ smccc_workaround_1 hvc
+ENTRY(__smccc_workaround_1_hvc_end)
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 2a752cb2a0f3..8021b46c9743 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -16,7 +16,7 @@
#include <asm/virt.h>
.text
-.pushsection .idmap.text, "ax"
+.pushsection .idmap.text, "awx"
/*
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ed6881882231..07823595b7f0 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -67,9 +67,12 @@ static int cpu_enable_trap_ctr_access(void *__unused)
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM
-extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
extern char __qcom_hyp_sanitize_link_stack_start[];
extern char __qcom_hyp_sanitize_link_stack_end[];
+extern char __smccc_workaround_1_smc_start[];
+extern char __smccc_workaround_1_smc_end[];
+extern char __smccc_workaround_1_hvc_start[];
+extern char __smccc_workaround_1_hvc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
@@ -112,10 +115,12 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
spin_unlock(&bp_lock);
}
#else
-#define __psci_hyp_bp_inval_start NULL
-#define __psci_hyp_bp_inval_end NULL
#define __qcom_hyp_sanitize_link_stack_start NULL
#define __qcom_hyp_sanitize_link_stack_end NULL
+#define __smccc_workaround_1_smc_start NULL
+#define __smccc_workaround_1_smc_end NULL
+#define __smccc_workaround_1_hvc_start NULL
+#define __smccc_workaround_1_hvc_end NULL
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
@@ -142,17 +147,59 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
+#include <uapi/linux/psci.h>
+#include <linux/arm-smccc.h>
#include <linux/psci.h>
-static int enable_psci_bp_hardening(void *data)
+static void call_smc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static void call_hvc_arch_workaround_1(void)
+{
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+}
+
+static int enable_smccc_arch_workaround_1(void *data)
{
const struct arm64_cpu_capabilities *entry = data;
+ bp_hardening_cb_t cb;
+ void *smccc_start, *smccc_end;
+ struct arm_smccc_res res;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+ return 0;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+ return 0;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_hvc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_hvc_start;
+ smccc_end = __smccc_workaround_1_hvc_end;
+ break;
+
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+ return 0;
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+
+ default:
+ return 0;
+ }
- if (psci_ops.get_version)
- install_bp_hardening_cb(entry,
- (bp_hardening_cb_t)psci_ops.get_version,
- __psci_hyp_bp_inval_start,
- __psci_hyp_bp_inval_end);
+ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
return 0;
}
@@ -333,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
@@ -362,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
- .enable = enable_psci_bp_hardening,
+ .enable = enable_smccc_arch_workaround_1,
},
#endif
{
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0fb6a3151443..29b1f873e337 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -856,12 +856,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
int __unused)
{
+ char const *str = "command line option";
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
- /* Forced on command line? */
+ /*
+ * For reasons that aren't entirely clear, enabling KPTI on Cavium
+ * ThunderX leads to apparent I-cache corruption of kernel text, which
+ * ends as well as you might imagine. Don't even try.
+ */
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ str = "ARM64_WORKAROUND_CAVIUM_27456";
+ __kpti_forced = -1;
+ }
+
+ /* Forced? */
if (__kpti_forced) {
- pr_info_once("kernel page table isolation forced %s by command line option\n",
- __kpti_forced > 0 ? "ON" : "OFF");
+ pr_info_once("kernel page table isolation forced %s by %s\n",
+ __kpti_forced > 0 ? "ON" : "OFF", str);
return __kpti_forced > 0;
}
@@ -881,6 +892,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
ID_AA64PFR0_CSV3_SHIFT);
}
+static int kpti_install_ng_mappings(void *__unused)
+{
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+ extern kpti_remap_fn idmap_kpti_install_ng_mappings;
+ kpti_remap_fn *remap_fn;
+
+ static bool kpti_applied = false;
+ int cpu = smp_processor_id();
+
+ if (kpti_applied)
+ return 0;
+
+ remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+
+ cpu_install_idmap();
+ remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ cpu_uninstall_idmap();
+
+ if (!cpu)
+ kpti_applied = true;
+
+ return 0;
+}
+
static int __init parse_kpti(char *str)
{
bool enabled;
@@ -1004,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.def_scope = SCOPE_SYSTEM,
.matches = unmap_kernel_at_el0,
+ .enable = kpti_install_ng_mappings,
},
#endif
{
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cccd2788e631..ec2ee720e33e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -167,10 +167,10 @@ alternative_else_nop_endif
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
- /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+ /* Save the task's original addr_limit and set USER_DS */
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
@@ -382,6 +382,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
* x7 is reserved for the system call number in 32-bit mode.
*/
wsc_nr .req w25 // number of system calls
+xsc_nr .req x25 // number of system calls (zero-extended)
wscno .req w26 // syscall number
xscno .req x26 // syscall number (zero-extended)
stbl .req x27 // syscall table pointer
@@ -770,7 +771,10 @@ el0_sp_pc:
* Stack or PC alignment exception handling
*/
mrs x26, far_el1
- enable_daif
+ enable_da_f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl trace_hardirqs_off
+#endif
ct_user_exit
mov x0, x26
mov x1, x25
@@ -828,6 +832,11 @@ el0_irq_naked:
#endif
ct_user_exit
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+ tbz x22, #55, 1f
+ bl do_el0_irq_bp_hardening
+1:
+#endif
irq_handler
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -939,6 +948,7 @@ el0_svc_naked: // compat entry point
b.ne __sys_trace
cmp wscno, wsc_nr // check upper syscall limit
b.hs ni_sys
+ mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number
ldr x16, [stbl, xscno, lsl #3] // address in the syscall table
blr x16 // call sys_* routine
b ret_fast_syscall
@@ -1017,16 +1027,9 @@ alternative_else_nop_endif
orr \tmp, \tmp, #USER_ASID_FLAG
msr ttbr1_el1, \tmp
/*
- * We avoid running the post_ttbr_update_workaround here because the
- * user and kernel ASIDs don't have conflicting mappings, so any
- * "blessing" as described in:
- *
- * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
- *
- * will not hurt correctness. Whilst this may partially defeat the
- * point of using split ASIDs in the first place, it avoids
- * the hit of invalidating the entire I-cache on every return to
- * userspace.
+ * We avoid running the post_ttbr_update_workaround here because
+ * it's only needed by Cavium ThunderX, which requires KPTI to be
+ * disabled.
*/
.endm
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index ba3ab04788dc..2b6b8b24e5ab 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -148,26 +148,6 @@ preserve_boot_args:
ENDPROC(preserve_boot_args)
/*
- * Macro to arrange a physical address in a page table entry, taking care of
- * 52-bit addresses.
- *
- * Preserves: phys
- * Returns: pte
- */
- .macro phys_to_pte, phys, pte
-#ifdef CONFIG_ARM64_PA_BITS_52
- /*
- * We assume \phys is 64K aligned and this is guaranteed by only
- * supporting this configuration with 64K pages.
- */
- orr \pte, \phys, \phys, lsr #36
- and \pte, \pte, #PTE_ADDR_MASK
-#else
- mov \pte, \phys
-#endif
- .endm
-
-/*
* Macro to create a table entry to the next page.
*
* tbl: page table address
@@ -181,7 +161,7 @@ ENDPROC(preserve_boot_args)
*/
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
add \tmp1, \tbl, #PAGE_SIZE
- phys_to_pte \tmp1, \tmp2
+ phys_to_pte \tmp2, \tmp1
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
lsr \tmp1, \virt, #\shift
sub \ptrs, \ptrs, #1
@@ -207,7 +187,7 @@ ENDPROC(preserve_boot_args)
* Returns: rtbl
*/
.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
-.Lpe\@: phys_to_pte \rtbl, \tmp1
+.Lpe\@: phys_to_pte \tmp1, \rtbl
orr \tmp1, \tmp1, \flags // tmp1 = table entry
str \tmp1, [\tbl, \index, lsl #3]
add \rtbl, \rtbl, \inc // rtbl = pa next level
@@ -475,7 +455,7 @@ ENDPROC(__primary_switched)
* end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region
*/
- .section ".idmap.text","ax"
+ .section ".idmap.text","awx"
ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET
@@ -776,8 +756,8 @@ ENTRY(__enable_mmu)
update_early_cpu_boot_status 0, x1, x2
adrp x1, idmap_pg_dir
adrp x2, swapper_pg_dir
- phys_to_ttbr x1, x3
- phys_to_ttbr x2, x4
+ phys_to_ttbr x3, x1
+ phys_to_ttbr x4, x2
msr ttbr0_el1, x3 // load TTBR0
msr ttbr1_el1, x4 // load TTBR1
isb
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 84f5d52fddda..dd14ab8c9f72 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -34,12 +34,12 @@
* each stage of the walk.
*/
.macro break_before_make_ttbr_switch zero_page, page_table, tmp
- phys_to_ttbr \zero_page, \tmp
+ phys_to_ttbr \tmp, \zero_page
msr ttbr1_el1, \tmp
isb
tlbi vmalle1
dsb nsh
- phys_to_ttbr \page_table, \tmp
+ phys_to_ttbr \tmp, \page_table
msr ttbr1_el1, \tmp
isb
.endm
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 10dd16d7902d..bebec8ef9372 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter)
ret
ENDPROC(__cpu_suspend_enter)
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 5c7f657dd207..d7e3299a7734 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -361,10 +361,16 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
+ int ret = 0;
+
+ vcpu_load(vcpu);
+
trace_kvm_set_guest_debug(vcpu, dbg->control);
- if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
- return -EINVAL;
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
+ ret = -EINVAL;
+ goto out;
+ }
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
@@ -378,7 +384,10 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
/* If not enabled clear all flags */
vcpu->guest_debug = 0;
}
- return 0;
+
+out:
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 520b0dad3c62..e5e741bfffe1 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -22,13 +22,14 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <kvm/arm_psci.h>
+
#include <asm/esr.h>
#include <asm/exception.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h>
-#include <asm/kvm_psci.h>
#include <asm/debug-monitors.h>
#include <asm/traps.h>
@@ -51,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vcpu_hvc_get_imm(vcpu));
vcpu->stat.hvc_exit_stat++;
- ret = kvm_psci_call(vcpu);
+ ret = kvm_hvc_call_handler(vcpu);
if (ret < 0) {
vcpu_set_reg(vcpu, 0, ~0UL);
return 1;
@@ -62,7 +63,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
vcpu_set_reg(vcpu, 0, ~0UL);
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index e086c6eff8c6..5aa9ccf6db99 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -63,7 +63,7 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- phys_to_ttbr x0, x4
+ phys_to_ttbr x4, x0
msr ttbr0_el2, x4
mrs x4, tcr_el1
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index f4363d40e2cd..dabb5cc7b087 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -21,6 +21,7 @@
#include <asm/debug-monitors.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
#define read_debug(r,n) read_sysreg(r##n##_el1)
#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index e4f37b9dd47c..f36464bd57c5 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -15,6 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/linkage.h>
#include <asm/alternative.h>
@@ -64,10 +65,11 @@ alternative_endif
lsr x0, x1, #ESR_ELx_EC_SHIFT
cmp x0, #ESR_ELx_EC_HVC64
+ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
b.ne el1_trap
- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
- cbnz x1, el1_trap // called HVC
+ mrs x1, vttbr_el2 // If vttbr is valid, the guest
+ cbnz x1, el1_hvc_guest // called HVC
/* Here, we're pretty sure the host called HVC. */
ldp x0, x1, [sp], #16
@@ -100,6 +102,20 @@ alternative_endif
eret
+el1_hvc_guest:
+ /*
+ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
+ * The workaround has already been applied on the host,
+ * so let's quickly get back to the guest. We don't bother
+ * restoring x1, as it can be clobbered anyway.
+ */
+ ldr x1, [sp] // Guest's x0
+ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
+ cbnz w1, el1_trap
+ mov x0, x1
+ add sp, sp, #16
+ eret
+
el1_trap:
/*
* x0: ESR_EC
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 036e1f3d77a6..116252a8d3a5 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -19,9 +19,12 @@
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
+#include <kvm/arm_psci.h>
+
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
@@ -348,18 +351,6 @@ again:
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
- if (exit_code == ARM_EXCEPTION_TRAP &&
- (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
- vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
- u64 val = PSCI_RET_NOT_SUPPORTED;
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
- val = 2;
-
- vcpu_set_reg(vcpu, 0, val);
- goto again;
- }
-
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
exit_code == ARM_EXCEPTION_TRAP) {
bool valid;
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 73464a96c365..131c7772703c 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -16,6 +16,7 @@
*/
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 3d69a8d41fa5..21ba0b29621b 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -21,7 +21,7 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: int __arch_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
@@ -29,7 +29,7 @@
*
* Alignment fixed up by hardware.
*/
-ENTRY(__clear_user)
+ENTRY(__arch_clear_user)
uaccess_enable_not_uao x2, x3, x4
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
@@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
5: mov x0, #0
uaccess_disable_not_uao x2, x3
ret
-ENDPROC(__clear_user)
+ENDPROC(__arch_clear_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index fbb090f431a5..54b75deb1d16 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -64,14 +64,15 @@
.endm
end .req x5
-ENTRY(raw_copy_in_user)
+
+ENTRY(__arch_copy_in_user)
uaccess_enable_not_uao x3, x4, x5
add end, x0, x2
#include "copy_template.S"
uaccess_disable_not_uao x3, x4
mov x0, #0
ret
-ENDPROC(raw_copy_in_user)
+ENDPROC(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 91464e7f77cc..758bde7e2fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -60,16 +60,7 @@ user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
b.lo 1b
dsb ish
- icache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-USER(9f, ic ivau, x4 ) // invalidate I line PoU
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
- isb
+ invalidate_icache_by_line x0, x1, x2, x3, 9f
mov x0, #0
1:
uaccess_ttbr0_disable x1, x2
@@ -81,6 +72,27 @@ ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
/*
+ * invalidate_icache_range(start,end)
+ *
+ * Ensure that the I cache is invalid within specified region.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(invalidate_icache_range)
+ uaccess_ttbr0_enable x2, x3, x4
+
+ invalidate_icache_by_line x0, x1, x2, x3, 2f
+ mov x0, xzr
+1:
+ uaccess_ttbr0_disable x1, x2
+ ret
+2:
+ mov x0, #-EFAULT
+ b 1b
+ENDPROC(invalidate_icache_range)
+
+/*
* __flush_dcache_area(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index ce441d29e7f6..f76bb2c3c943 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < USER_DS && system_uses_ttbr0_pan())
+ if (addr < TASK_SIZE && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < USER_DS && is_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -707,6 +707,12 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr);
}
+asmlinkage void __exception do_el0_irq_bp_hardening(void)
+{
+ /* PC has already been checked in entry.S */
+ arm64_apply_bp_hardening();
+}
+
asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
@@ -731,6 +737,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct siginfo info;
struct task_struct *tsk = current;
+ if (user_mode(regs)) {
+ if (instruction_pointer(regs) > TASK_SIZE)
+ arm64_apply_bp_hardening();
+ local_irq_enable();
+ }
+
if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
tsk->comm, task_pid_nr(tsk),
@@ -790,6 +802,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
+ if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ arm64_apply_bp_hardening();
+
if (!inf->fn(addr, esr, regs)) {
rv = 1;
} else {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 4e369dfb83b1..4694cda823c9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -118,6 +118,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
if ((old | new) & PTE_CONT)
return false;
+ /* Transitioning from Global to Non-Global is safe */
+ if (((old ^ new) == PTE_NG) && (new & PTE_NG))
+ return true;
+
return ((old ^ new) & ~mask) == 0;
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 9f177aac6390..71baed7e592a 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -90,7 +90,7 @@ ENDPROC(cpu_do_suspend)
*
* x0: Address of context pointer
*/
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(cpu_do_resume)
ldp x2, x3, [x0]
ldp x4, x5, [x0, #16]
@@ -153,7 +153,7 @@ ENDPROC(cpu_do_resume)
ENTRY(cpu_do_switch_mm)
mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
- phys_to_ttbr x0, x3
+ phys_to_ttbr x3, x0
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
bfi x3, x1, #48, #16 // set the ASID field in TTBR0
#endif
@@ -165,7 +165,18 @@ ENTRY(cpu_do_switch_mm)
b post_ttbr_update_workaround // Back to C code...
ENDPROC(cpu_do_switch_mm)
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
+
+.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
+ adrp \tmp1, empty_zero_page
+ phys_to_ttbr \tmp2, \tmp1
+ msr ttbr1_el1, \tmp2
+ isb
+ tlbi vmalle1
+ dsb nsh
+ isb
+.endm
+
/*
* void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
*
@@ -175,24 +186,201 @@ ENDPROC(cpu_do_switch_mm)
ENTRY(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
- adrp x1, empty_zero_page
- phys_to_ttbr x1, x3
+ __idmap_cpu_set_reserved_ttbr1 x1, x3
+
+ phys_to_ttbr x3, x0
msr ttbr1_el1, x3
isb
- tlbi vmalle1
- dsb nsh
+ restore_daif x2
+
+ ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+ .popsection
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ .pushsection ".idmap.text", "awx"
+
+ .macro __idmap_kpti_get_pgtable_ent, type
+ dc cvac, cur_\()\type\()p // Ensure any existing dirty
+ dmb sy // lines are written back before
+ ldr \type, [cur_\()\type\()p] // loading the entry
+ tbz \type, #0, next_\()\type // Skip invalid entries
+ .endm
+
+ .macro __idmap_kpti_put_pgtable_ent_ng, type
+ orr \type, \type, #PTE_NG // Same bit for blocks and pages
+ str \type, [cur_\()\type\()p] // Update the entry and ensure it
+ dc civac, cur_\()\type\()p // is visible to all CPUs.
+ .endm
+
+/*
+ * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ *
+ * Called exactly once from stop_machine context by each CPU found during boot.
+ */
+__idmap_kpti_flag:
+ .long 1
+ENTRY(idmap_kpti_install_ng_mappings)
+ cpu .req w0
+ num_cpus .req w1
+ swapper_pa .req x2
+ swapper_ttb .req x3
+ flag_ptr .req x4
+ cur_pgdp .req x5
+ end_pgdp .req x6
+ pgd .req x7
+ cur_pudp .req x8
+ end_pudp .req x9
+ pud .req x10
+ cur_pmdp .req x11
+ end_pmdp .req x12
+ pmd .req x13
+ cur_ptep .req x14
+ end_ptep .req x15
+ pte .req x16
+
+ mrs swapper_ttb, ttbr1_el1
+ adr flag_ptr, __idmap_kpti_flag
+
+ cbnz cpu, __idmap_kpti_secondary
+
+ /* We're the boot CPU. Wait for the others to catch up */
+ sevl
+1: wfe
+ ldaxr w18, [flag_ptr]
+ eor w18, w18, num_cpus
+ cbnz w18, 1b
+
+ /* We need to walk swapper, so turn off the MMU. */
+ pre_disable_mmu_workaround
+ mrs x18, sctlr_el1
+ bic x18, x18, #SCTLR_ELx_M
+ msr sctlr_el1, x18
isb
- phys_to_ttbr x0, x3
- msr ttbr1_el1, x3
+ /* Everybody is enjoying the idmap, so we can rewrite swapper. */
+ /* PGD */
+ mov cur_pgdp, swapper_pa
+ add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
+do_pgd: __idmap_kpti_get_pgtable_ent pgd
+ tbnz pgd, #1, walk_puds
+ __idmap_kpti_put_pgtable_ent_ng pgd
+next_pgd:
+ add cur_pgdp, cur_pgdp, #8
+ cmp cur_pgdp, end_pgdp
+ b.ne do_pgd
+
+ /* Publish the updated tables and nuke all the TLBs */
+ dsb sy
+ tlbi vmalle1is
+ dsb ish
isb
- restore_daif x2
+ /* We're done: fire up the MMU again */
+ mrs x18, sctlr_el1
+ orr x18, x18, #SCTLR_ELx_M
+ msr sctlr_el1, x18
+ isb
+ /* Set the flag to zero to indicate that we're all done */
+ str wzr, [flag_ptr]
ret
-ENDPROC(idmap_cpu_replace_ttbr1)
+
+ /* PUD */
+walk_puds:
+ .if CONFIG_PGTABLE_LEVELS > 3
+ pte_to_phys cur_pudp, pgd
+ add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
+do_pud: __idmap_kpti_get_pgtable_ent pud
+ tbnz pud, #1, walk_pmds
+ __idmap_kpti_put_pgtable_ent_ng pud
+next_pud:
+ add cur_pudp, cur_pudp, 8
+ cmp cur_pudp, end_pudp
+ b.ne do_pud
+ b next_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
+ mov pud, pgd
+ b walk_pmds
+next_pud:
+ b next_pgd
+ .endif
+
+ /* PMD */
+walk_pmds:
+ .if CONFIG_PGTABLE_LEVELS > 2
+ pte_to_phys cur_pmdp, pud
+ add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
+do_pmd: __idmap_kpti_get_pgtable_ent pmd
+ tbnz pmd, #1, walk_ptes
+ __idmap_kpti_put_pgtable_ent_ng pmd
+next_pmd:
+ add cur_pmdp, cur_pmdp, #8
+ cmp cur_pmdp, end_pmdp
+ b.ne do_pmd
+ b next_pud
+ .else /* CONFIG_PGTABLE_LEVELS <= 2 */
+ mov pmd, pud
+ b walk_ptes
+next_pmd:
+ b next_pud
+ .endif
+
+ /* PTE */
+walk_ptes:
+ pte_to_phys cur_ptep, pmd
+ add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
+do_pte: __idmap_kpti_get_pgtable_ent pte
+ __idmap_kpti_put_pgtable_ent_ng pte
+next_pte:
+ add cur_ptep, cur_ptep, #8
+ cmp cur_ptep, end_ptep
+ b.ne do_pte
+ b next_pmd
+
+ /* Secondary CPUs end up here */
+__idmap_kpti_secondary:
+ /* Uninstall swapper before surgery begins */
+ __idmap_cpu_set_reserved_ttbr1 x18, x17
+
+ /* Increment the flag to let the boot CPU we're ready */
+1: ldxr w18, [flag_ptr]
+ add w18, w18, #1
+ stxr w17, w18, [flag_ptr]
+ cbnz w17, 1b
+
+ /* Wait for the boot CPU to finish messing around with swapper */
+ sevl
+1: wfe
+ ldxr w18, [flag_ptr]
+ cbnz w18, 1b
+
+ /* All done, act like nothing happened */
+ msr ttbr1_el1, swapper_ttb
+ isb
+ ret
+
+ .unreq cpu
+ .unreq num_cpus
+ .unreq swapper_pa
+ .unreq swapper_ttb
+ .unreq flag_ptr
+ .unreq cur_pgdp
+ .unreq end_pgdp
+ .unreq pgd
+ .unreq cur_pudp
+ .unreq end_pudp
+ .unreq pud
+ .unreq cur_pmdp
+ .unreq end_pmdp
+ .unreq pmd
+ .unreq cur_ptep
+ .unreq end_ptep
+ .unreq pte
+ENDPROC(idmap_kpti_install_ng_mappings)
.popsection
+#endif
/*
* __cpu_setup
@@ -200,7 +388,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
* Initialise the processor for turning the MMU on. Return in x0 the
* value of the SCTLR_EL1 register.
*/
- .pushsection ".idmap.text", "ax"
+ .pushsection ".idmap.text", "awx"
ENTRY(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
diff --git a/arch/cris/kernel/Makefile b/arch/cris/kernel/Makefile
index e69de29bb2d1..f6bfee6c8c1b 100644
--- a/arch/cris/kernel/Makefile
+++ b/arch/cris/kernel/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the CRIS port.
+#
+
+CPPFLAGS_vmlinux.lds := -DDRAM_VIRTUAL_BASE=0x$(CONFIG_ETRAX_DRAM_VIRTUAL_BASE)
+extra-y := vmlinux.lds
+
+obj-y := process.o traps.o irq.o ptrace.o setup.o time.o sys_cris.o
+obj-y += stacktrace.o
+
+obj-$(CONFIG_MODULES) += crisksyms.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_SYSTEM_PROFILER) += profile.o
+
+clean:
+
diff --git a/arch/cris/kernel/setup.c b/arch/cris/kernel/setup.c
index 524d47501a23..1b61a7207afb 100644
--- a/arch/cris/kernel/setup.c
+++ b/arch/cris/kernel/setup.c
@@ -24,6 +24,7 @@
#include <linux/of_fdt.h>
#include <asm/setup.h>
#include <arch/system.h>
+#include <asm/sections.h>
/*
* Setup options
@@ -31,7 +32,6 @@
struct screen_info screen_info;
extern int root_mountflags;
-extern char _etext, _edata, _end;
char __initdata cris_command_line[COMMAND_LINE_SIZE] = { 0, };
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 449397c60b56..8128c3b68d6b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2333,7 +2333,6 @@ config MIPS_VPE_LOADER_TOM
config MIPS_VPE_APSP_API
bool "Enable support for AP/SP API (RTLX)"
depends on MIPS_VPE_LOADER
- help
config MIPS_VPE_APSP_API_CMP
bool
diff --git a/arch/mips/bcm63xx/boards/Kconfig b/arch/mips/bcm63xx/boards/Kconfig
index 6ff0a7481081..f60d96610ace 100644
--- a/arch/mips/bcm63xx/boards/Kconfig
+++ b/arch/mips/bcm63xx/boards/Kconfig
@@ -7,6 +7,5 @@ choice
config BOARD_BCM963XX
bool "Generic Broadcom 963xx boards"
select SSB
- help
endchoice
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index b17447ce8873..76b93a9c8c9b 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ select HAVE_KVM_VCPU_ASYNC_IOCTL
select KVM_MMIO
select MMU_NOTIFIER
select SRCU
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 75fdeaa8c62f..2549fdd27ee1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -446,6 +446,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r = -EINTR;
+ vcpu_load(vcpu);
+
kvm_sigset_activate(vcpu);
if (vcpu->mmio_needed) {
@@ -480,6 +482,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
out:
kvm_sigset_deactivate(vcpu);
+ vcpu_put(vcpu);
return r;
}
@@ -900,6 +903,26 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return r;
}
+long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+
+ if (ioctl == KVM_INTERRUPT) {
+ struct kvm_mips_interrupt irq;
+
+ if (copy_from_user(&irq, argp, sizeof(irq)))
+ return -EFAULT;
+ kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+ irq.irq);
+
+ return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
unsigned long arg)
{
@@ -907,56 +930,54 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
void __user *argp = (void __user *)arg;
long r;
+ vcpu_load(vcpu);
+
switch (ioctl) {
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
+ r = -EFAULT;
if (copy_from_user(&reg, argp, sizeof(reg)))
- return -EFAULT;
+ break;
if (ioctl == KVM_SET_ONE_REG)
- return kvm_mips_set_reg(vcpu, &reg);
+ r = kvm_mips_set_reg(vcpu, &reg);
else
- return kvm_mips_get_reg(vcpu, &reg);
+ r = kvm_mips_get_reg(vcpu, &reg);
+ break;
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
+ r = -EFAULT;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
- return -EFAULT;
+ break;
n = reg_list.n;
reg_list.n = kvm_mips_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
- return -EFAULT;
+ break;
+ r = -E2BIG;
if (n < reg_list.n)
- return -E2BIG;
- return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
- }
- case KVM_INTERRUPT:
- {
- struct kvm_mips_interrupt irq;
-
- if (copy_from_user(&irq, argp, sizeof(irq)))
- return -EFAULT;
- kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
- irq.irq);
-
- r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
- }
+ r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
case KVM_ENABLE_CAP: {
struct kvm_enable_cap cap;
+ r = -EFAULT;
if (copy_from_user(&cap, argp, sizeof(cap)))
- return -EFAULT;
+ break;
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
break;
}
default:
r = -ENOIOCTLCMD;
}
+
+ vcpu_put(vcpu);
return r;
}
@@ -1145,6 +1166,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
vcpu->arch.gprs[i] = regs->gpr[i];
vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
@@ -1152,6 +1175,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.lo = regs->lo;
vcpu->arch.pc = regs->pc;
+ vcpu_put(vcpu);
return 0;
}
@@ -1159,6 +1183,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
regs->gpr[i] = vcpu->arch.gprs[i];
@@ -1166,6 +1192,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->lo = vcpu->arch.lo;
regs->pc = vcpu->arch.pc;
+ vcpu_put(vcpu);
return 0;
}
diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
index 60fae03dac79..3d4ec88f1db1 100644
--- a/arch/nios2/Kconfig
+++ b/arch/nios2/Kconfig
@@ -152,7 +152,6 @@ menu "Advanced setup"
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
- help
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 9a667007bff8..376ae803b69c 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -249,10 +249,8 @@ extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
extern int kvmppc_hcall_impl_pr(unsigned long cmd);
extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
-extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
- struct kvm_vcpu *vcpu);
-extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
- struct kvmppc_book3s_shadow_vcpu *svcpu);
+extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 735cfa35298a..998f7b7aaa9e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -122,13 +122,13 @@ static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
lphi = (l >> 16) & 0xf;
switch ((l >> 12) & 0xf) {
case 0:
- return !lphi ? 24 : -1; /* 16MB */
+ return !lphi ? 24 : 0; /* 16MB */
break;
case 1:
return 16; /* 64kB */
break;
case 3:
- return !lphi ? 34 : -1; /* 16GB */
+ return !lphi ? 34 : 0; /* 16GB */
break;
case 7:
return (16 << 8) + 12; /* 64kB in 4kB */
@@ -140,7 +140,7 @@ static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
return (24 << 8) + 12; /* 16MB in 4kB */
break;
}
- return -1;
+ return 0;
}
static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
@@ -159,7 +159,11 @@ static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l
static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
{
- return 1ul << kvmppc_hpte_actual_page_shift(v, r);
+ int shift = kvmppc_hpte_actual_page_shift(v, r);
+
+ if (shift)
+ return 1ul << shift;
+ return 0;
}
static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
@@ -232,7 +236,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
va_low ^= v >> (SID_SHIFT_1T - 16);
va_low &= 0x7ff;
- if (b_pgshift == 12) {
+ if (b_pgshift <= 12) {
if (a_pgshift > 12) {
sllp = (a_pgshift == 16) ? 5 : 4;
rb |= sllp << 5; /* AP field */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 3aa5b577cd60..1f53b562726f 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -690,6 +690,7 @@ struct kvm_vcpu_arch {
u8 mmio_vsx_offset;
u8 mmio_vsx_copy_type;
u8 mmio_vsx_tx_sx_enabled;
+ u8 mmio_vmx_copy_nums;
u8 osi_needed;
u8 osi_enabled;
u8 papr_enabled;
@@ -709,6 +710,7 @@ struct kvm_vcpu_arch {
u8 ceded;
u8 prodded;
u8 doorbell_request;
+ u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst;
struct swait_queue_head *wqp;
@@ -738,8 +740,11 @@ struct kvm_vcpu_arch {
struct kvmppc_icp *icp; /* XICS presentation controller */
struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
__be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
- u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ u8 xive_pushed; /* Is the VP pushed on the physical CPU ? */
+ u8 xive_esc_on; /* Is the escalation irq enabled ? */
union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
+ u64 xive_esc_raddr; /* Escalation interrupt ESB real addr */
+ u64 xive_esc_vaddr; /* Escalation interrupt ESB virt addr */
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -800,6 +805,7 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_QPR 0x0040
#define KVM_MMIO_REG_FQPR 0x0060
#define KVM_MMIO_REG_VSX 0x0080
+#define KVM_MMIO_REG_VMX 0x00c0
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9db18287b5f4..7765a800ddae 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -81,6 +81,10 @@ extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
+extern int kvmppc_handle_load128_by2x64(struct kvm_run *run,
+ struct kvm_vcpu *vcpu, unsigned int rt, int is_default_endian);
+extern int kvmppc_handle_store128_by2x64(struct kvm_run *run,
+ struct kvm_vcpu *vcpu, unsigned int rs, int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 24c73f5575ee..94bd1bf2c873 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1076,6 +1076,7 @@ enum {
/* Flags for OPAL_XIVE_GET/SET_VP_INFO */
enum {
OPAL_XIVE_VP_ENABLED = 0x00000001,
+ OPAL_XIVE_VP_SINGLE_ESCALATION = 0x00000002,
};
/* "Any chip" replacement for chip ID for allocation functions */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ab5c1588b487..f1083bcf449c 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -156,6 +156,12 @@
#define OP_31_XOP_LFDX 599
#define OP_31_XOP_LFDUX 631
+/* VMX Vector Load Instructions */
+#define OP_31_XOP_LVX 103
+
+/* VMX Vector Store Instructions */
+#define OP_31_XOP_STVX 231
+
#define OP_LWZ 32
#define OP_STFS 52
#define OP_STFSU 53
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 7624e22f5045..8d1a2792484f 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -111,9 +111,10 @@ extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
extern void xive_native_sync_source(u32 hw_irq);
extern bool is_xive_irq(struct irq_chip *chip);
-extern int xive_native_enable_vp(u32 vp_id);
+extern int xive_native_enable_vp(u32 vp_id, bool single_escalation);
extern int xive_native_disable_vp(u32 vp_id);
extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
+extern bool xive_native_has_single_escalation(void);
#else
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 637b7263cb86..833ed9a16adf 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -632,6 +632,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
+#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 88b84ac76b53..ea5eb91b836e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -520,6 +520,7 @@ int main(void)
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
OFFSET(VCPU_PRODDED, kvm_vcpu, arch.prodded);
+ OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
@@ -739,6 +740,9 @@ int main(void)
DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
arch.xive_cam_word));
DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
+ DEFINE(VCPU_XIVE_ESC_ON, offsetof(struct kvm_vcpu, arch.xive_esc_on));
+ DEFINE(VCPU_XIVE_ESC_RADDR, offsetof(struct kvm_vcpu, arch.xive_esc_raddr));
+ DEFINE(VCPU_XIVE_ESC_VADDR, offsetof(struct kvm_vcpu, arch.xive_esc_vaddr));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index b12b8eb39c29..68a0e9d5b440 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -22,6 +22,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_EVENTFD
+ select HAVE_KVM_VCPU_ASYNC_IOCTL
select SRCU
select KVM_VFIO
select IRQ_BYPASS_MANAGER
@@ -68,7 +69,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
- select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_SERIES || PPC_POWERNV)
+ select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
---help---
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 72d977e30952..234531d1bee1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -484,19 +484,33 @@ void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
- return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+ int ret;
+
+ vcpu_load(vcpu);
+ ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+ vcpu_put(vcpu);
+
+ return ret;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
- return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+ int ret;
+
+ vcpu_load(vcpu);
+ ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+ vcpu_put(vcpu);
+
+ return ret;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
regs->pc = kvmppc_get_pc(vcpu);
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = kvmppc_get_ctr(vcpu);
@@ -518,6 +532,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu_put(vcpu);
return 0;
}
@@ -525,6 +540,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
kvmppc_set_pc(vcpu, regs->pc);
kvmppc_set_cr(vcpu, regs->cr);
kvmppc_set_ctr(vcpu, regs->ctr);
@@ -545,6 +562,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
+ vcpu_put(vcpu);
return 0;
}
@@ -737,7 +755,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
+ vcpu_load(vcpu);
vcpu->guest_debug = dbg->control;
+ vcpu_put(vcpu);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index b73dbc9e797d..ef243fed2f2b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1269,6 +1269,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
/* Nothing to do */
goto out;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ rpte = be64_to_cpu(hptep[1]);
+ vpte = hpte_new_to_old_v(vpte, rpte);
+ }
+
/* Unmap */
rev = &old->rev[idx];
guest_rpte = rev->guest_rpte;
@@ -1298,7 +1303,6 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
/* Reload PTE after unmap */
vpte = be64_to_cpu(hptep[0]);
-
BUG_ON(vpte & HPTE_V_VALID);
BUG_ON(!(vpte & HPTE_V_ABSENT));
@@ -1307,6 +1311,12 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
goto out;
rpte = be64_to_cpu(hptep[1]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ vpte = hpte_new_to_old_v(vpte, rpte);
+ rpte = hpte_new_to_old_r(rpte);
+ }
+
pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
pteg = idx / HPTES_PER_GROUP;
@@ -1337,17 +1347,17 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
}
new_pteg = hash & new_hash_mask;
- if (vpte & HPTE_V_SECONDARY) {
- BUG_ON(~pteg != (hash & old_hash_mask));
- new_pteg = ~new_pteg;
- } else {
- BUG_ON(pteg != (hash & old_hash_mask));
- }
+ if (vpte & HPTE_V_SECONDARY)
+ new_pteg = ~hash & new_hash_mask;
new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
new_hptep = (__be64 *)(new->virt + (new_idx << 4));
replace_vpte = be64_to_cpu(new_hptep[0]);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ unsigned long replace_rpte = be64_to_cpu(new_hptep[1]);
+ replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte);
+ }
if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
BUG_ON(new->order >= old->order);
@@ -1363,6 +1373,11 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
/* Discard the previous HPTE */
}
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ rpte = hpte_old_to_new_r(vpte, rpte);
+ vpte = hpte_old_to_new_v(vpte);
+ }
+
new_hptep[1] = cpu_to_be64(rpte);
new->rev[new_idx].guest_rpte = guest_rpte;
/* No need for a barrier, since new HPT isn't active */
@@ -1380,12 +1395,6 @@ static int resize_hpt_rehash(struct kvm_resize_hpt *resize)
unsigned long i;
int rc;
- /*
- * resize_hpt_rehash_hpte() doesn't handle the new-format HPTEs
- * that POWER9 uses, and could well hit a BUG_ON on POWER9.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- return -EIO;
for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) {
rc = resize_hpt_rehash_hpte(resize, i);
if (rc != 0)
@@ -1416,6 +1425,9 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)
synchronize_srcu_expedited(&kvm->srcu);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ kvmppc_setup_partition_table(kvm);
+
resize_hpt_debug(resize, "resize_hpt_pivot() done\n");
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 58618f644c56..0c854816e653 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -573,7 +573,7 @@ long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
j = i + 1;
if (npages) {
set_dirty_bits(map, i, npages);
- i = j + npages;
+ j = i + npages;
}
}
return 0;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index e4f70c33fbc7..89707354c2ef 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -116,6 +116,9 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
+/* If set, the threads on each CPU core have to be in the same MMU mode */
+static bool no_mixing_hpt_and_radix;
+
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -1003,8 +1006,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tvcpu;
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return EMULATE_FAIL;
if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
return RESUME_GUEST;
if (get_op(inst) != 31)
@@ -1054,6 +1055,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -1174,7 +1176,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
+ /* Need vcore unlocked to call kvmppc_get_last_inst */
+ spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu);
+ spin_lock(&vcpu->arch.vcore->lock);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1189,8 +1194,13 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL;
- if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
+ if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
+ cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* Need vcore unlocked to call kvmppc_get_last_inst */
+ spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_doorbell_instr(vcpu);
+ spin_lock(&vcpu->arch.vcore->lock);
+ }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1495,6 +1505,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_ARCH_COMPAT:
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
break;
+ case KVM_REG_PPC_DEC_EXPIRY:
+ *val = get_reg_val(id, vcpu->arch.dec_expires +
+ vcpu->arch.vcore->tb_offset);
+ break;
default:
r = -EINVAL;
break;
@@ -1722,6 +1736,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_ARCH_COMPAT:
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
+ case KVM_REG_PPC_DEC_EXPIRY:
+ vcpu->arch.dec_expires = set_reg_val(id, *val) -
+ vcpu->arch.vcore->tb_offset;
+ break;
default:
r = -EINVAL;
break;
@@ -2376,8 +2394,8 @@ static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
static bool subcore_config_ok(int n_subcores, int n_threads)
{
/*
- * POWER9 "SMT4" cores are permanently in what is effectively a 4-way split-core
- * mode, with one thread per subcore.
+ * POWER9 "SMT4" cores are permanently in what is effectively a 4-way
+ * split-core mode, with one thread per subcore.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
return n_subcores <= 4 && n_threads == 1;
@@ -2413,8 +2431,8 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return false;
- /* POWER9 currently requires all threads to be in the same MMU mode */
- if (cpu_has_feature(CPU_FTR_ARCH_300) &&
+ /* Some POWER9 chips require all threads to be in the same MMU mode */
+ if (no_mixing_hpt_and_radix &&
kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
return false;
@@ -2677,9 +2695,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
* On POWER9, we need to be not in independent-threads mode if
- * this is a HPT guest on a radix host.
+ * this is a HPT guest on a radix host machine where the
+ * CPU threads may not be in different MMU modes.
*/
- hpt_on_radix = radix_enabled() && !kvm_is_radix(vc->kvm);
+ hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
+ !kvm_is_radix(vc->kvm);
if (((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
(hpt_on_radix && vc->kvm->arch.threads_indep)) {
@@ -2829,7 +2849,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*/
if (!thr0_done)
kvmppc_start_thread(NULL, pvc);
- thr += pvc->num_threads;
}
/*
@@ -2932,13 +2951,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
/* make sure updates to secondary vcpu structs are visible now */
smp_mb();
+ preempt_enable();
+
for (sub = 0; sub < core_info.n_subcores; ++sub) {
pvc = core_info.vc[sub];
post_guest_process(pvc, pvc == vc);
}
spin_lock(&vc->lock);
- preempt_enable();
out:
vc->vcore_state = VCORE_INACTIVE;
@@ -2985,7 +3005,7 @@ static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
{
if (!xive_enabled())
return false;
- return vcpu->arch.xive_saved_state.pipr <
+ return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr <
vcpu->arch.xive_saved_state.cppr;
}
#else
@@ -3174,17 +3194,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
* this thread straight away and have it join in.
*/
if (!signal_pending(current)) {
- if (vc->vcore_state == VCORE_PIGGYBACK) {
- if (spin_trylock(&vc->lock)) {
- if (vc->vcore_state == VCORE_RUNNING &&
- !VCORE_IS_EXITING(vc)) {
- kvmppc_create_dtl_entry(vcpu, vc);
- kvmppc_start_thread(vcpu, vc);
- trace_kvm_guest_enter(vcpu);
- }
- spin_unlock(&vc->lock);
- }
- } else if (vc->vcore_state == VCORE_RUNNING &&
+ if ((vc->vcore_state == VCORE_PIGGYBACK ||
+ vc->vcore_state == VCORE_RUNNING) &&
!VCORE_IS_EXITING(vc)) {
kvmppc_create_dtl_entry(vcpu, vc);
kvmppc_start_thread(vcpu, vc);
@@ -4446,6 +4457,19 @@ static int kvmppc_book3s_init_hv(void)
if (kvmppc_radix_possible())
r = kvmppc_radix_init();
+
+ /*
+ * POWER9 chips before version 2.02 can't have some threads in
+ * HPT mode and some in radix mode on the same core.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ unsigned int pvr = mfspr(SPRN_PVR);
+ if ((pvr >> 16) == PVR_POWER9 &&
+ (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+ ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+ no_mixing_hpt_and_radix = true;
+ }
+
return r;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7886b313d135..f31f357b8c5a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -413,10 +413,11 @@ FTR_SECTION_ELSE
/* On P9 we use the split_info for coordinating LPCR changes */
lwz r4, KVM_SPLIT_DO_SET(r6)
cmpwi r4, 0
- beq 63f
+ beq 1f
mr r3, r6
bl kvmhv_p9_set_lpcr
nop
+1:
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
@@ -617,13 +618,6 @@ kvmppc_hv_entry:
lbz r0, KVM_RADIX(r9)
cmpwi cr7, r0, 0
- /* Clear out SLB if hash */
- bne cr7, 2f
- li r6,0
- slbmte r6,r6
- slbia
- ptesync
-2:
/*
* POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies,
@@ -738,19 +732,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
10: cmpdi r4, 0
beq kvmppc_primary_no_guest
kvmppc_got_guest:
-
- /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
- lwz r5,VCPU_SLB_MAX(r4)
- cmpwi r5,0
- beq 9f
- mtctr r5
- addi r6,r4,VCPU_SLB
-1: ld r8,VCPU_SLB_E(r6)
- ld r9,VCPU_SLB_V(r6)
- slbmte r9,r8
- addi r6,r6,VCPU_SLB_SIZE
- bdnz 1b
-9:
/* Increment yield count if they have a VPA */
ld r3, VCPU_VPA(r4)
cmpdi r3, 0
@@ -957,7 +938,6 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
mftb r7
subf r3,r7,r8
mtspr SPRN_DEC,r3
- std r3,VCPU_DEC(r4)
ld r5, VCPU_SPRG0(r4)
ld r6, VCPU_SPRG1(r4)
@@ -1018,6 +998,29 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
+ /* For hash guest, clear out and reload the SLB */
+ ld r6, VCPU_KVM(r4)
+ lbz r0, KVM_RADIX(r6)
+ cmpwi r0, 0
+ bne 9f
+ li r6, 0
+ slbmte r6, r6
+ slbia
+ ptesync
+
+ /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
+ lwz r5,VCPU_SLB_MAX(r4)
+ cmpwi r5,0
+ beq 9f
+ mtctr r5
+ addi r6,r4,VCPU_SLB
+1: ld r8,VCPU_SLB_E(r6)
+ ld r9,VCPU_SLB_V(r6)
+ slbmte r9,r8
+ addi r6,r6,VCPU_SLB_SIZE
+ bdnz 1b
+9:
+
#ifdef CONFIG_KVM_XICS
/* We are entering the guest on that thread, push VCPU to XIVE */
ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
@@ -1031,8 +1034,53 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10
li r9, 1
- stw r9, VCPU_XIVE_PUSHED(r4)
+ stb r9, VCPU_XIVE_PUSHED(r4)
eieio
+
+ /*
+ * We clear the irq_pending flag. There is a small chance of a
+ * race vs. the escalation interrupt happening on another
+ * processor setting it again, but the only consequence is to
+ * cause a spurrious wakeup on the next H_CEDE which is not an
+ * issue.
+ */
+ li r0,0
+ stb r0, VCPU_IRQ_PENDING(r4)
+
+ /*
+ * In single escalation mode, if the escalation interrupt is
+ * on, we mask it.
+ */
+ lbz r0, VCPU_XIVE_ESC_ON(r4)
+ cmpwi r0,0
+ beq 1f
+ ld r10, VCPU_XIVE_ESC_RADDR(r4)
+ li r9, XIVE_ESB_SET_PQ_01
+ ldcix r0, r10, r9
+ sync
+
+ /* We have a possible subtle race here: The escalation interrupt might
+ * have fired and be on its way to the host queue while we mask it,
+ * and if we unmask it early enough (re-cede right away), there is
+ * a theorical possibility that it fires again, thus landing in the
+ * target queue more than once which is a big no-no.
+ *
+ * Fortunately, solving this is rather easy. If the above load setting
+ * PQ to 01 returns a previous value where P is set, then we know the
+ * escalation interrupt is somewhere on its way to the host. In that
+ * case we simply don't clear the xive_esc_on flag below. It will be
+ * eventually cleared by the handler for the escalation interrupt.
+ *
+ * Then, when doing a cede, we check that flag again before re-enabling
+ * the escalation interrupt, and if set, we abort the cede.
+ */
+ andi. r0, r0, XIVE_ESB_VAL_P
+ bne- 1f
+
+ /* Now P is 0, we can clear the flag */
+ li r0, 0
+ stb r0, VCPU_XIVE_ESC_ON(r4)
+1:
no_xive:
#endif /* CONFIG_KVM_XICS */
@@ -1193,7 +1241,7 @@ hdec_soon:
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
- b guest_exit_cont
+ b guest_bypass
/******************************************************************************
* *
@@ -1423,15 +1471,35 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
blt deliver_guest_interrupt
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+ /* Save more register state */
+ mfdar r6
+ mfdsisr r7
+ std r6, VCPU_DAR(r9)
+ stw r7, VCPU_DSISR(r9)
+ /* don't overwrite fault_dar/fault_dsisr if HDSI */
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
+ beq mc_cont
+ std r6, VCPU_FAULT_DAR(r9)
+ stw r7, VCPU_FAULT_DSISR(r9)
+
+ /* See if it is a machine check */
+ cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq machine_check_realmode
+mc_cont:
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+ addi r3, r9, VCPU_TB_RMEXIT
+ mr r4, r9
+ bl kvmhv_accumulate_time
+#endif
#ifdef CONFIG_KVM_XICS
/* We are exiting, pull the VP from the XIVE */
- lwz r0, VCPU_XIVE_PUSHED(r9)
+ lbz r0, VCPU_XIVE_PUSHED(r9)
cmpwi cr0, r0, 0
beq 1f
li r7, TM_SPC_PULL_OS_CTX
li r6, TM_QW1_OS
mfmsr r0
- andi. r0, r0, MSR_IR /* in real mode? */
+ andi. r0, r0, MSR_DR /* in real mode? */
beq 2f
ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
cmpldi cr0, r10, 0
@@ -1454,33 +1522,42 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Fixup some of the state for the next load */
li r10, 0
li r0, 0xff
- stw r10, VCPU_XIVE_PUSHED(r9)
+ stb r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
eieio
1:
#endif /* CONFIG_KVM_XICS */
- /* Save more register state */
- mfdar r6
- mfdsisr r7
- std r6, VCPU_DAR(r9)
- stw r7, VCPU_DSISR(r9)
- /* don't overwrite fault_dar/fault_dsisr if HDSI */
- cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
- beq mc_cont
- std r6, VCPU_FAULT_DAR(r9)
- stw r7, VCPU_FAULT_DSISR(r9)
- /* See if it is a machine check */
- cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
- beq machine_check_realmode
-mc_cont:
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
- addi r3, r9, VCPU_TB_RMEXIT
- mr r4, r9
- bl kvmhv_accumulate_time
-#endif
+ /* For hash guest, read the guest SLB and save it away */
+ ld r5, VCPU_KVM(r9)
+ lbz r0, KVM_RADIX(r5)
+ li r5, 0
+ cmpwi r0, 0
+ bne 3f /* for radix, save 0 entries */
+ lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
+ mtctr r0
+ li r6,0
+ addi r7,r9,VCPU_SLB
+1: slbmfee r8,r6
+ andis. r0,r8,SLB_ESID_V@h
+ beq 2f
+ add r8,r8,r6 /* put index in */
+ slbmfev r3,r6
+ std r8,VCPU_SLB_E(r7)
+ std r3,VCPU_SLB_V(r7)
+ addi r7,r7,VCPU_SLB_SIZE
+ addi r5,r5,1
+2: addi r6,r6,1
+ bdnz 1b
+ /* Finally clear out the SLB */
+ li r0,0
+ slbmte r0,r0
+ slbia
+ ptesync
+3: stw r5,VCPU_SLB_MAX(r9)
+guest_bypass:
mr r3, r12
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
@@ -1501,31 +1578,6 @@ mc_cont:
ori r6,r6,1
mtspr SPRN_CTRLT,r6
4:
- /* Check if we are running hash or radix and store it in cr2 */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
- cmpwi cr2,r0,0
-
- /* Read the guest SLB and save it away */
- li r5, 0
- bne cr2, 3f /* for radix, save 0 entries */
- lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
- mtctr r0
- li r6,0
- addi r7,r9,VCPU_SLB
-1: slbmfee r8,r6
- andis. r0,r8,SLB_ESID_V@h
- beq 2f
- add r8,r8,r6 /* put index in */
- slbmfev r3,r6
- std r8,VCPU_SLB_E(r7)
- std r3,VCPU_SLB_V(r7)
- addi r7,r7,VCPU_SLB_SIZE
- addi r5,r5,1
-2: addi r6,r6,1
- bdnz 1b
-3: stw r5,VCPU_SLB_MAX(r9)
-
/*
* Save the guest PURR/SPURR
*/
@@ -1803,7 +1855,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0
- beq cr2, 3f
+ beq cr2, 4f
/* Radix: Handle the case where the guest used an illegal PID */
LOAD_REG_ADDR(r4, mmu_base_pid)
@@ -1839,15 +1891,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
PPC_INVALIDATE_ERAT
END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
- b 4f
+4:
#endif /* CONFIG_PPC_RADIX_MMU */
- /* Hash: clear out SLB */
-3: li r5,0
- slbmte r5,r5
- slbia
- ptesync
-4:
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -2745,7 +2791,32 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
ld r9, HSTATE_KVM_VCPU(r13)
- b guest_exit_cont
+#ifdef CONFIG_KVM_XICS
+ /* Abort if we still have a pending escalation */
+ lbz r5, VCPU_XIVE_ESC_ON(r9)
+ cmpwi r5, 0
+ beq 1f
+ li r0, 0
+ stb r0, VCPU_CEDED(r9)
+1: /* Enable XIVE escalation */
+ li r5, XIVE_ESB_SET_PQ_00
+ mfmsr r0
+ andi. r0, r0, MSR_DR /* in real mode? */
+ beq 1f
+ ld r10, VCPU_XIVE_ESC_VADDR(r9)
+ cmpdi r10, 0
+ beq 3f
+ ldx r0, r10, r5
+ b 2f
+1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
+ cmpdi r10, 0
+ beq 3f
+ ldcix r0, r10, r5
+2: sync
+ li r0, 1
+ stb r0, VCPU_XIVE_ESC_ON(r9)
+#endif /* CONFIG_KVM_XICS */
+3: b guest_exit_cont
/* Try to handle a machine check in real mode */
machine_check_realmode:
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 901e6fe00c39..c18e845019ec 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -96,7 +96,7 @@ kvm_start_entry:
kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
- GET_SHADOW_VCPU(r3)
+ mr r3, r4
bl FUNC(kvmppc_copy_to_svcpu)
nop
REST_GPR(4, r1)
@@ -165,9 +165,7 @@ after_sprg3_load:
stw r12, VCPU_TRAP(r3)
/* Transfer reg values from shadow vcpu back to vcpu struct */
- /* On 64-bit, interrupts are still off at this point */
- GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7deaeeb14b93..3ae752314b34 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -121,7 +121,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
if (svcpu->in_use) {
- kvmppc_copy_from_svcpu(vcpu, svcpu);
+ kvmppc_copy_from_svcpu(vcpu);
}
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
@@ -143,9 +143,10 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
}
/* Copy data needed by real-mode code from vcpu to shadow vcpu */
-void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
- struct kvm_vcpu *vcpu)
+void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+
svcpu->gpr[0] = vcpu->arch.gpr[0];
svcpu->gpr[1] = vcpu->arch.gpr[1];
svcpu->gpr[2] = vcpu->arch.gpr[2];
@@ -177,17 +178,14 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
if (cpu_has_feature(CPU_FTR_ARCH_207S))
vcpu->arch.entry_ic = mfspr(SPRN_IC);
svcpu->in_use = true;
+
+ svcpu_put(svcpu);
}
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
-void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
- struct kvmppc_book3s_shadow_vcpu *svcpu)
+void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
{
- /*
- * vcpu_put would just call us again because in_use hasn't
- * been updated yet.
- */
- preempt_disable();
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
/*
* Maybe we were already preempted and synced the svcpu from
@@ -233,7 +231,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
svcpu->in_use = false;
out:
- preempt_enable();
+ svcpu_put(svcpu);
}
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 6882bc94eba8..f0f5cd4d2fe7 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -84,12 +84,22 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
{
struct kvm_vcpu *vcpu = data;
- /* We use the existing H_PROD mechanism to wake up the target */
- vcpu->arch.prodded = 1;
+ vcpu->arch.irq_pending = 1;
smp_mb();
if (vcpu->arch.ceded)
kvmppc_fast_vcpu_kick(vcpu);
+ /* Since we have the no-EOI flag, the interrupt is effectively
+ * disabled now. Clearing xive_esc_on means we won't bother
+ * doing so on the next entry.
+ *
+ * This also allows the entry code to know that if a PQ combination
+ * of 10 is observed while xive_esc_on is true, it means the queue
+ * contains an unprocessed escalation interrupt. We don't make use of
+ * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
+ */
+ vcpu->arch.xive_esc_on = false;
+
return IRQ_HANDLED;
}
@@ -112,19 +122,21 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
return -EIO;
}
- /*
- * Future improvement: start with them disabled
- * and handle DD2 and later scheme of merged escalation
- * interrupts
- */
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
- vcpu->kvm->arch.lpid, xc->server_num, prio);
+ if (xc->xive->single_escalation)
+ name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
+ vcpu->kvm->arch.lpid, xc->server_num);
+ else
+ name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ vcpu->kvm->arch.lpid, xc->server_num, prio);
if (!name) {
pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
prio, xc->server_num);
rc = -ENOMEM;
goto error;
}
+
+ pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
+
rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
IRQF_NO_THREAD, name, vcpu);
if (rc) {
@@ -133,6 +145,25 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio)
goto error;
}
xc->esc_virq_names[prio] = name;
+
+ /* In single escalation mode, we grab the ESB MMIO of the
+ * interrupt and mask it. Also populate the VCPU v/raddr
+ * of the ESB page for use by asm entry/exit code. Finally
+ * set the XIVE_IRQ_NO_EOI flag which will prevent the
+ * core code from performing an EOI on the escalation
+ * interrupt, thus leaving it effectively masked after
+ * it fires once.
+ */
+ if (xc->xive->single_escalation) {
+ struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
+ vcpu->arch.xive_esc_raddr = xd->eoi_page;
+ vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
+ xd->flags |= XIVE_IRQ_NO_EOI;
+ }
+
return 0;
error:
irq_dispose_mapping(xc->esc_virq[prio]);
@@ -191,12 +222,12 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
pr_devel("Provisioning prio... %d\n", prio);
- /* Provision each VCPU and enable escalations */
+ /* Provision each VCPU and enable escalations if needed */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!vcpu->arch.xive_vcpu)
continue;
rc = xive_provision_queue(vcpu, prio);
- if (rc == 0)
+ if (rc == 0 && !xive->single_escalation)
xive_attach_escalation(vcpu, prio);
if (rc)
return rc;
@@ -1082,6 +1113,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
/* Allocate IPI */
xc->vp_ipi = xive_native_alloc_irq();
if (!xc->vp_ipi) {
+ pr_err("Failed to allocate xive irq for VCPU IPI\n");
r = -EIO;
goto bail;
}
@@ -1092,18 +1124,33 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
goto bail;
/*
+ * Enable the VP first as the single escalation mode will
+ * affect escalation interrupts numbering
+ */
+ r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
+ if (r) {
+ pr_err("Failed to enable VP in OPAL, err %d\n", r);
+ goto bail;
+ }
+
+ /*
* Initialize queues. Initially we set them all for no queueing
* and we enable escalation for queue 0 only which we'll use for
* our mfrr change notifications. If the VCPU is hot-plugged, we
- * do handle provisioning however.
+ * do handle provisioning however based on the existing "map"
+ * of enabled queues.
*/
for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
struct xive_q *q = &xc->queues[i];
+ /* Single escalation, no queue 7 */
+ if (i == 7 && xive->single_escalation)
+ break;
+
/* Is queue already enabled ? Provision it */
if (xive->qmap & (1 << i)) {
r = xive_provision_queue(vcpu, i);
- if (r == 0)
+ if (r == 0 && !xive->single_escalation)
xive_attach_escalation(vcpu, i);
if (r)
goto bail;
@@ -1123,11 +1170,6 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
if (r)
goto bail;
- /* Enable the VP */
- r = xive_native_enable_vp(xc->vp_id);
- if (r)
- goto bail;
-
/* Route the IPI */
r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
if (!r)
@@ -1474,6 +1516,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
val, server, guest_prio);
+
/*
* If the source doesn't already have an IPI, allocate
* one and get the corresponding data
@@ -1762,6 +1805,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
if (xive->vp_base == XIVE_INVALID_VP)
ret = -ENOMEM;
+ xive->single_escalation = xive_native_has_single_escalation();
+
if (ret) {
kfree(xive);
return ret;
@@ -1795,6 +1840,7 @@ static int xive_debug_show(struct seq_file *m, void *private)
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ unsigned int i;
if (!xc)
continue;
@@ -1804,6 +1850,33 @@ static int xive_debug_show(struct seq_file *m, void *private)
xc->server_num, xc->cppr, xc->hw_cppr,
xc->mfrr, xc->pending,
xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
+ for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
+ struct xive_q *q = &xc->queues[i];
+ u32 i0, i1, idx;
+
+ if (!q->qpage && !xc->esc_virq[i])
+ continue;
+
+ seq_printf(m, " [q%d]: ", i);
+
+ if (q->qpage) {
+ idx = q->idx;
+ i0 = be32_to_cpup(q->qpage + idx);
+ idx = (idx + 1) & q->msk;
+ i1 = be32_to_cpup(q->qpage + idx);
+ seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1);
+ }
+ if (xc->esc_virq[i]) {
+ struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
+ struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
+ u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
+ seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
+ (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
+ (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
+ xc->esc_virq[i], pq, xd->eoi_page);
+ seq_printf(m, "\n");
+ }
+ }
t_rm_h_xirr += xc->stat_rm_h_xirr;
t_rm_h_ipoll += xc->stat_rm_h_ipoll;
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 6ba63f8e8a61..a08ae6fd4c51 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -120,6 +120,8 @@ struct kvmppc_xive {
u32 q_order;
u32 q_page_order;
+ /* Flags */
+ u8 single_escalation;
};
#define KVMPPC_XIVE_Q_COUNT 8
@@ -201,25 +203,20 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp
* is as follow.
*
* Guest request for 0...6 are honored. Guest request for anything
- * higher results in a priority of 7 being applied.
- *
- * However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
- * in order to match AIX expectations
+ * higher results in a priority of 6 being applied.
*
* Similar mapping is done for CPPR values
*/
static inline u8 xive_prio_from_guest(u8 prio)
{
- if (prio == 0xff || prio < 8)
+ if (prio == 0xff || prio < 6)
return prio;
- return 7;
+ return 6;
}
static inline u8 xive_prio_to_guest(u8 prio)
{
- if (prio == 0xff || prio < 7)
- return prio;
- return 0xb;
+ return prio;
}
static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 83b485810aea..6038e2e7aee0 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1431,6 +1431,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
regs->pc = vcpu->arch.pc;
regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
@@ -1452,6 +1454,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
+ vcpu_put(vcpu);
return 0;
}
@@ -1459,6 +1462,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
int i;
+ vcpu_load(vcpu);
+
vcpu->arch.pc = regs->pc;
kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
@@ -1480,6 +1485,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
+ vcpu_put(vcpu);
return 0;
}
@@ -1607,30 +1613,42 @@ int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ int ret;
+
+ vcpu_load(vcpu);
+
sregs->pvr = vcpu->arch.pvr;
get_sregs_base(vcpu, sregs);
get_sregs_arch206(vcpu, sregs);
- return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+ ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
+
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
- int ret;
+ int ret = -EINVAL;
+ vcpu_load(vcpu);
if (vcpu->arch.pvr != sregs->pvr)
- return -EINVAL;
+ goto out;
ret = set_sregs_base(vcpu, sregs);
if (ret < 0)
- return ret;
+ goto out;
ret = set_sregs_arch206(vcpu, sregs);
if (ret < 0)
- return ret;
+ goto out;
- return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+ ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
+
+out:
+ vcpu_put(vcpu);
+ return ret;
}
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
@@ -1773,7 +1791,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
int r;
+ vcpu_load(vcpu);
r = kvmppc_core_vcpu_translate(vcpu, tr);
+ vcpu_put(vcpu);
return r;
}
@@ -1996,12 +2016,15 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{
struct debug_reg *dbg_reg;
int n, b = 0, w = 0;
+ int ret = 0;
+
+ vcpu_load(vcpu);
if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
vcpu->arch.dbg_reg.dbcr0 = 0;
vcpu->guest_debug = 0;
kvm_guest_protect_msr(vcpu, MSR_DE, false);
- return 0;
+ goto out;
}
kvm_guest_protect_msr(vcpu, MSR_DE, true);
@@ -2033,8 +2056,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
#endif
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
- return 0;
+ goto out;
+ ret = -EINVAL;
for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
uint64_t addr = dbg->arch.bp[n].addr;
uint32_t type = dbg->arch.bp[n].type;
@@ -2045,21 +2069,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (type & ~(KVMPPC_DEBUG_WATCH_READ |
KVMPPC_DEBUG_WATCH_WRITE |
KVMPPC_DEBUG_BREAKPOINT))
- return -EINVAL;
+ goto out;
if (type & KVMPPC_DEBUG_BREAKPOINT) {
/* Setting H/W breakpoint */
if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
- return -EINVAL;
+ goto out;
} else {
/* Setting H/W watchpoint */
if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
type, w++))
- return -EINVAL;
+ goto out;
}
}
- return 0;
+ ret = 0;
+out:
+ vcpu_put(vcpu);
+ return ret;
}
void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index af833531af31..a382e15135e6 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -58,6 +58,18 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_VSX */
+#ifdef CONFIG_ALTIVEC
+static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
+ kvmppc_core_queue_vec_unavail(vcpu);
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_ALTIVEC */
+
/*
* XXX to do:
* lfiwax, lfiwzx
@@ -98,6 +110,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0;
+ vcpu->arch.mmio_vmx_copy_nums = 0;
switch (get_op(inst)) {
case 31:
@@ -459,6 +472,29 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs, 4, 1);
break;
#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_ALTIVEC
+ case OP_31_XOP_LVX:
+ if (kvmppc_check_altivec_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.vaddr_accessed &= ~0xFULL;
+ vcpu->arch.paddr_accessed &= ~0xFULL;
+ vcpu->arch.mmio_vmx_copy_nums = 2;
+ emulated = kvmppc_handle_load128_by2x64(run, vcpu,
+ KVM_MMIO_REG_VMX|rt, 1);
+ break;
+
+ case OP_31_XOP_STVX:
+ if (kvmppc_check_altivec_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.vaddr_accessed &= ~0xFULL;
+ vcpu->arch.paddr_accessed &= ~0xFULL;
+ vcpu->arch.mmio_vmx_copy_nums = 2;
+ emulated = kvmppc_handle_store128_by2x64(run, vcpu,
+ rs, 1);
+ break;
+#endif /* CONFIG_ALTIVEC */
+
default:
emulated = EMULATE_FAIL;
break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0a7c88786ec0..403e642c78f5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -638,8 +638,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
case KVM_CAP_SPAPR_RESIZE_HPT:
- /* Disable this on POWER9 until code handles new HPTE format */
- r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
+ r = !!hv_enabled;
break;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -763,7 +762,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
- vcpu->arch.dec_expires = ~(u64)0;
+ vcpu->arch.dec_expires = get_tb();
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
@@ -930,6 +929,34 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
}
#endif /* CONFIG_VSX */
+#ifdef CONFIG_ALTIVEC
+static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
+ u64 gpr)
+{
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+ u32 hi, lo;
+ u32 di;
+
+#ifdef __BIG_ENDIAN
+ hi = gpr >> 32;
+ lo = gpr & 0xffffffff;
+#else
+ lo = gpr >> 32;
+ hi = gpr & 0xffffffff;
+#endif
+
+ di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
+ if (di > 1)
+ return;
+
+ if (vcpu->arch.mmio_host_swabbed)
+ di = 1 - di;
+
+ VCPU_VSX_VR(vcpu, index).u[di * 2] = hi;
+ VCPU_VSX_VR(vcpu, index).u[di * 2 + 1] = lo;
+}
+#endif /* CONFIG_ALTIVEC */
+
#ifdef CONFIG_PPC_FPU
static inline u64 sp_to_dp(u32 fprs)
{
@@ -1033,6 +1060,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_vsr_dword_dump(vcpu, gpr);
break;
#endif
+#ifdef CONFIG_ALTIVEC
+ case KVM_MMIO_REG_VMX:
+ kvmppc_set_vmx_dword(vcpu, gpr);
+ break;
+#endif
default:
BUG();
}
@@ -1106,11 +1138,9 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
enum emulation_result emulated = EMULATE_DONE;
- /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
- if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
- (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
+ if (vcpu->arch.mmio_vsx_copy_nums > 4)
return EMULATE_FAIL;
- }
while (vcpu->arch.mmio_vsx_copy_nums) {
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
@@ -1252,11 +1282,9 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.io_gpr = rs;
- /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
- if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
- (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
+ if (vcpu->arch.mmio_vsx_copy_nums > 4)
return EMULATE_FAIL;
- }
while (vcpu->arch.mmio_vsx_copy_nums) {
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
@@ -1312,6 +1340,111 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
}
#endif /* CONFIG_VSX */
+#ifdef CONFIG_ALTIVEC
+/* handle quadword load access in two halves */
+int kvmppc_handle_load128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, int is_default_endian)
+{
+ enum emulation_result emulated;
+
+ while (vcpu->arch.mmio_vmx_copy_nums) {
+ emulated = __kvmppc_handle_load(run, vcpu, rt, 8,
+ is_default_endian, 0);
+
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.mmio_vmx_copy_nums--;
+ }
+
+ return emulated;
+}
+
+static inline int kvmppc_get_vmx_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
+{
+ vector128 vrs = VCPU_VSX_VR(vcpu, rs);
+ u32 di;
+ u64 w0, w1;
+
+ di = 2 - vcpu->arch.mmio_vmx_copy_nums; /* doubleword index */
+ if (di > 1)
+ return -1;
+
+ if (vcpu->arch.mmio_host_swabbed)
+ di = 1 - di;
+
+ w0 = vrs.u[di * 2];
+ w1 = vrs.u[di * 2 + 1];
+
+#ifdef __BIG_ENDIAN
+ *val = (w0 << 32) | w1;
+#else
+ *val = (w1 << 32) | w0;
+#endif
+ return 0;
+}
+
+/* handle quadword store in two halves */
+int kvmppc_handle_store128_by2x64(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rs, int is_default_endian)
+{
+ u64 val = 0;
+ enum emulation_result emulated = EMULATE_DONE;
+
+ vcpu->arch.io_gpr = rs;
+
+ while (vcpu->arch.mmio_vmx_copy_nums) {
+ if (kvmppc_get_vmx_data(vcpu, rs, &val) == -1)
+ return EMULATE_FAIL;
+
+ emulated = kvmppc_handle_store(run, vcpu, val, 8,
+ is_default_endian);
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.mmio_vmx_copy_nums--;
+ }
+
+ return emulated;
+}
+
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ enum emulation_result emulated = EMULATE_FAIL;
+ int r;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ if (!vcpu->mmio_is_write) {
+ emulated = kvmppc_handle_load128_by2x64(run, vcpu,
+ vcpu->arch.io_gpr, 1);
+ } else {
+ emulated = kvmppc_handle_store128_by2x64(run, vcpu,
+ vcpu->arch.io_gpr, 1);
+ }
+
+ switch (emulated) {
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ r = RESUME_HOST;
+ break;
+ default:
+ r = RESUME_GUEST;
+ break;
+ }
+ return r;
+}
+#endif /* CONFIG_ALTIVEC */
+
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
int r = 0;
@@ -1413,6 +1546,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r;
+ vcpu_load(vcpu);
+
if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
@@ -1427,7 +1562,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
- return r;
+ goto out;
+ }
+ }
+#endif
+#ifdef CONFIG_ALTIVEC
+ if (vcpu->arch.mmio_vmx_copy_nums > 0)
+ vcpu->arch.mmio_vmx_copy_nums--;
+
+ if (vcpu->arch.mmio_vmx_copy_nums > 0) {
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
+ if (r == RESUME_HOST) {
+ vcpu->mmio_needed = 1;
+ goto out;
}
}
#endif
@@ -1461,6 +1608,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_sigset_deactivate(vcpu);
+out:
+ vcpu_put(vcpu);
return r;
}
@@ -1608,23 +1757,31 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return -EINVAL;
}
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
- long r;
- switch (ioctl) {
- case KVM_INTERRUPT: {
+ if (ioctl == KVM_INTERRUPT) {
struct kvm_interrupt irq;
- r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof(irq)))
- goto out;
- r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
- goto out;
+ return -EFAULT;
+ return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
}
+ return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ vcpu_load(vcpu);
+ switch (ioctl) {
case KVM_ENABLE_CAP:
{
struct kvm_enable_cap cap;
@@ -1664,6 +1821,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
out:
+ vcpu_put(vcpu);
return r;
}
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index e44d2b2ea97e..1c03c978eb18 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -143,8 +143,7 @@ static int kvmppc_exit_timing_show(struct seq_file *m, void *private)
int i;
u64 min, max, sum, sum_quad;
- seq_printf(m, "%s", "type count min max sum sum_squared\n");
-
+ seq_puts(m, "type count min max sum sum_squared\n");
for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) {
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index ebc244b08d67..d22aeb0b69e1 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -42,6 +42,7 @@ static u32 xive_provision_chip_count;
static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
+static bool xive_has_single_esc;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
@@ -571,6 +572,10 @@ bool __init xive_native_init(void)
break;
}
+ /* Do we support single escalation */
+ if (of_get_property(np, "single-escalation-support", NULL) != NULL)
+ xive_has_single_esc = true;
+
/* Configure Thread Management areas for KVM */
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
@@ -667,12 +672,15 @@ void xive_native_free_vp_block(u32 vp_base)
}
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
-int xive_native_enable_vp(u32 vp_id)
+int xive_native_enable_vp(u32 vp_id, bool single_escalation)
{
s64 rc;
+ u64 flags = OPAL_XIVE_VP_ENABLED;
+ if (single_escalation)
+ flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
for (;;) {
- rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
+ rc = opal_xive_set_vp_info(vp_id, flags, 0);
if (rc != OPAL_BUSY)
break;
msleep(1);
@@ -710,3 +718,9 @@ int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
+
+bool xive_native_has_single_escalation(void)
+{
+ return xive_has_single_esc;
+}
+EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0105ce28e246..eaee7087886f 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -112,7 +112,6 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
- select ARCH_WANTS_PROT_NUMA_PROT_NONE
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
@@ -540,6 +539,51 @@ config ARCH_RANDOM
If unsure, say Y.
+config KERNEL_NOBP
+ def_bool n
+ prompt "Enable modified branch prediction for the kernel by default"
+ help
+ If this option is selected the kernel will switch to a modified
+ branch prediction mode if the firmware interface is available.
+ The modified branch prediction mode improves the behaviour in
+ regard to speculative execution.
+
+ With the option enabled the kernel parameter "nobp=0" or "nospec"
+ can be used to run the kernel in the normal branch prediction mode.
+
+ With the option disabled the modified branch prediction mode is
+ enabled with the "nobp=1" kernel parameter.
+
+ If unsure, say N.
+
+config EXPOLINE
+ def_bool n
+ prompt "Avoid speculative indirect branches in the kernel"
+ help
+ Compile the kernel with the expoline compiler options to guard
+ against kernel-to-user data leaks by avoiding speculative indirect
+ branches.
+ Requires a compiler with -mindirect-branch=thunk support for full
+ protection. The kernel may run slower.
+
+ If unsure, say N.
+
+choice
+ prompt "Expoline default"
+ depends on EXPOLINE
+ default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+config EXPOLINE_MEDIUM
+ bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+ bool "spectre_v2=on"
+
+endchoice
+
endmenu
menu "Memory setup"
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index fd691c4ff89e..2ced3239cb84 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -78,6 +78,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_EXPOLINE
+ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+ cflags-y += $(CC_FLAGS_EXPOLINE)
+ endif
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# make use of hotpatch feature if the compiler supports it
cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index 10432607a573..f9eddbca79d2 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -49,6 +49,30 @@ do { \
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ if (__builtin_constant_p(size) && size > 0) {
+ asm(" clgr %2,%1\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+ return mask;
+ }
+ asm(" clgr %1,%2\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size), "d" (index) :"cc");
+ return ~mask;
+}
+
#include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 31e400c1a1f3..86e5b2fdee3c 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -261,6 +261,11 @@ static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
+static inline int test_and_clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+{
+ return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
+}
+
static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
{
return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index a478eb61aaf7..fb56fa3283a2 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -20,7 +20,9 @@ struct css_general_char {
u32 aif_tdd : 1; /* bit 56 */
u32 : 1;
u32 qebsm : 1; /* bit 58 */
- u32 : 8;
+ u32 : 2;
+ u32 aiv : 1; /* bit 61 */
+ u32 : 5;
u32 aif_osa : 1; /* bit 67 */
u32 : 12;
u32 eadm_rf : 1; /* bit 80 */
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
index eb5323161f11..bb63b2afdf6f 100644
--- a/arch/s390/include/asm/eadm.h
+++ b/arch/s390/include/asm/eadm.h
@@ -4,7 +4,7 @@
#include <linux/types.h>
#include <linux/device.h>
-#include <linux/blkdev.h>
+#include <linux/blk_types.h>
struct arqb {
u64 data;
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index fbe0c4be3cd8..99c8ce30b3cd 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -15,6 +15,24 @@
#define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
+
static inline int __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index c1b0a9ac1dc8..afb0f08b8021 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -2,7 +2,7 @@
/*
* definition for kernel virtual machines on s390
*
- * Copyright IBM Corp. 2008, 2009
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
@@ -183,6 +183,7 @@ struct kvm_s390_sie_block {
#define ECA_IB 0x40000000
#define ECA_SIGPI 0x10000000
#define ECA_MVPGI 0x01000000
+#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
#define ECA_SII 0x00000001
@@ -228,7 +229,9 @@ struct kvm_s390_sie_block {
__u8 epdx; /* 0x0069 */
__u8 reserved6a[2]; /* 0x006a */
__u32 todpr; /* 0x006c */
- __u8 reserved70[16]; /* 0x0070 */
+#define GISA_FORMAT1 0x00000001
+ __u32 gd; /* 0x0070 */
+ __u8 reserved74[12]; /* 0x0074 */
__u64 mso; /* 0x0080 */
__u64 msl; /* 0x0088 */
psw_t gpsw; /* 0x0090 */
@@ -317,18 +320,30 @@ struct kvm_vcpu_stat {
u64 deliver_program_int;
u64 deliver_io_int;
u64 exit_wait_state;
+ u64 instruction_epsw;
+ u64 instruction_gs;
+ u64 instruction_io_other;
+ u64 instruction_lpsw;
+ u64 instruction_lpswe;
u64 instruction_pfmf;
+ u64 instruction_ptff;
+ u64 instruction_sck;
+ u64 instruction_sckpf;
u64 instruction_stidp;
u64 instruction_spx;
u64 instruction_stpx;
u64 instruction_stap;
- u64 instruction_storage_key;
+ u64 instruction_iske;
+ u64 instruction_ri;
+ u64 instruction_rrbe;
+ u64 instruction_sske;
u64 instruction_ipte_interlock;
- u64 instruction_stsch;
- u64 instruction_chsc;
u64 instruction_stsi;
u64 instruction_stfl;
+ u64 instruction_tb;
+ u64 instruction_tpi;
u64 instruction_tprot;
+ u64 instruction_tsch;
u64 instruction_sie;
u64 instruction_essa;
u64 instruction_sthyi;
@@ -354,6 +369,7 @@ struct kvm_vcpu_stat {
u64 diagnose_258;
u64 diagnose_308;
u64 diagnose_500;
+ u64 diagnose_other;
};
#define PGM_OPERATION 0x01
@@ -410,35 +426,35 @@ struct kvm_vcpu_stat {
#define PGM_PER 0x80
#define PGM_CRYPTO_OPERATION 0x119
-/* irq types in order of priority */
+/* irq types in ascend order of priorities */
enum irq_types {
- IRQ_PEND_MCHK_EX = 0,
- IRQ_PEND_SVC,
- IRQ_PEND_PROG,
- IRQ_PEND_MCHK_REP,
- IRQ_PEND_EXT_IRQ_KEY,
- IRQ_PEND_EXT_MALFUNC,
- IRQ_PEND_EXT_EMERGENCY,
- IRQ_PEND_EXT_EXTERNAL,
- IRQ_PEND_EXT_CLOCK_COMP,
- IRQ_PEND_EXT_CPU_TIMER,
- IRQ_PEND_EXT_TIMING,
- IRQ_PEND_EXT_SERVICE,
- IRQ_PEND_EXT_HOST,
- IRQ_PEND_PFAULT_INIT,
- IRQ_PEND_PFAULT_DONE,
- IRQ_PEND_VIRTIO,
- IRQ_PEND_IO_ISC_0,
- IRQ_PEND_IO_ISC_1,
- IRQ_PEND_IO_ISC_2,
- IRQ_PEND_IO_ISC_3,
- IRQ_PEND_IO_ISC_4,
- IRQ_PEND_IO_ISC_5,
- IRQ_PEND_IO_ISC_6,
- IRQ_PEND_IO_ISC_7,
- IRQ_PEND_SIGP_STOP,
+ IRQ_PEND_SET_PREFIX = 0,
IRQ_PEND_RESTART,
- IRQ_PEND_SET_PREFIX,
+ IRQ_PEND_SIGP_STOP,
+ IRQ_PEND_IO_ISC_7,
+ IRQ_PEND_IO_ISC_6,
+ IRQ_PEND_IO_ISC_5,
+ IRQ_PEND_IO_ISC_4,
+ IRQ_PEND_IO_ISC_3,
+ IRQ_PEND_IO_ISC_2,
+ IRQ_PEND_IO_ISC_1,
+ IRQ_PEND_IO_ISC_0,
+ IRQ_PEND_VIRTIO,
+ IRQ_PEND_PFAULT_DONE,
+ IRQ_PEND_PFAULT_INIT,
+ IRQ_PEND_EXT_HOST,
+ IRQ_PEND_EXT_SERVICE,
+ IRQ_PEND_EXT_TIMING,
+ IRQ_PEND_EXT_CPU_TIMER,
+ IRQ_PEND_EXT_CLOCK_COMP,
+ IRQ_PEND_EXT_EXTERNAL,
+ IRQ_PEND_EXT_EMERGENCY,
+ IRQ_PEND_EXT_MALFUNC,
+ IRQ_PEND_EXT_IRQ_KEY,
+ IRQ_PEND_MCHK_REP,
+ IRQ_PEND_PROG,
+ IRQ_PEND_SVC,
+ IRQ_PEND_MCHK_EX,
IRQ_PEND_COUNT
};
@@ -516,9 +532,6 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt {
spinlock_t lock;
- struct kvm_s390_float_interrupt *float_int;
- struct swait_queue_head *wq;
- atomic_t *cpuflags;
DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
struct kvm_s390_irq_payload irq;
unsigned long pending_irqs;
@@ -707,14 +720,50 @@ struct kvm_s390_crypto_cb {
struct kvm_s390_apcb1 apcb1; /* 0x0080 */
};
+struct kvm_s390_gisa {
+ union {
+ struct { /* common to all formats */
+ u32 next_alert;
+ u8 ipm;
+ u8 reserved01[2];
+ u8 iam;
+ };
+ struct { /* format 0 */
+ u32 next_alert;
+ u8 ipm;
+ u8 reserved01;
+ u8 : 6;
+ u8 g : 1;
+ u8 c : 1;
+ u8 iam;
+ u8 reserved02[4];
+ u32 airq_count;
+ } g0;
+ struct { /* format 1 */
+ u32 next_alert;
+ u8 ipm;
+ u8 simm;
+ u8 nimm;
+ u8 iam;
+ u8 aism[8];
+ u8 : 6;
+ u8 g : 1;
+ u8 c : 1;
+ u8 reserved03[11];
+ u32 airq_count;
+ } g1;
+ };
+};
+
/*
- * sie_page2 has to be allocated as DMA because fac_list and crycb need
- * 31bit addresses in the sie control block.
+ * sie_page2 has to be allocated as DMA because fac_list, crycb and
+ * gisa need 31bit addresses in the sie control block.
*/
struct sie_page2 {
__u64 fac_list[S390_ARCH_FAC_LIST_SIZE_U64]; /* 0x0000 */
struct kvm_s390_crypto_cb crycb; /* 0x0800 */
- u8 reserved900[0x1000 - 0x900]; /* 0x0900 */
+ struct kvm_s390_gisa gisa; /* 0x0900 */
+ u8 reserved920[0x1000 - 0x920]; /* 0x0920 */
};
struct kvm_s390_vsie {
@@ -761,6 +810,7 @@ struct kvm_arch{
struct kvm_s390_migration_state *migration_state;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
+ struct kvm_s390_gisa *gisa;
};
#define KVM_HVA_ERR_BAD (-1UL)
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index ec6592e8ba36..5bc888841eaf 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -136,7 +136,11 @@ struct lowcore {
__u64 vdso_per_cpu_data; /* 0x03b8 */
__u64 machine_flags; /* 0x03c0 */
__u64 gmap; /* 0x03c8 */
- __u8 pad_0x03d0[0x0e00-0x03d0]; /* 0x03d0 */
+ __u8 pad_0x03d0[0x0400-0x03d0]; /* 0x03d0 */
+
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0400 */
+ __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -151,7 +155,8 @@ struct lowcore {
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u64 stfle_fac_list[16]; /* 0x0f00 */
+ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to the machine check extended save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..7df48e5cf36f
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_call_disable;
+extern int nospec_return_disable;
+
+void nospec_init_branches(void);
+void nospec_call_revert(s32 *start, s32 *end);
+void nospec_return_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bfbfad482289..7f2953c15c37 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,6 +91,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
+extern void __bpon(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -377,6 +378,9 @@ extern void memcpy_absolute(void *, void *, size_t);
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
} while (0)
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/runtime_instr.h b/arch/s390/include/asm/runtime_instr.h
index 6b1540337ed6..0e1605538cd4 100644
--- a/arch/s390/include/asm/runtime_instr.h
+++ b/arch/s390/include/asm/runtime_instr.h
@@ -2,75 +2,10 @@
#ifndef _RUNTIME_INSTR_H
#define _RUNTIME_INSTR_H
-#define S390_RUNTIME_INSTR_START 0x1
-#define S390_RUNTIME_INSTR_STOP 0x2
-
-struct runtime_instr_cb {
- __u64 rca;
- __u64 roa;
- __u64 rla;
-
- __u32 v : 1;
- __u32 s : 1;
- __u32 k : 1;
- __u32 h : 1;
- __u32 a : 1;
- __u32 reserved1 : 3;
- __u32 ps : 1;
- __u32 qs : 1;
- __u32 pc : 1;
- __u32 qc : 1;
- __u32 reserved2 : 1;
- __u32 g : 1;
- __u32 u : 1;
- __u32 l : 1;
- __u32 key : 4;
- __u32 reserved3 : 8;
- __u32 t : 1;
- __u32 rgs : 3;
-
- __u32 m : 4;
- __u32 n : 1;
- __u32 mae : 1;
- __u32 reserved4 : 2;
- __u32 c : 1;
- __u32 r : 1;
- __u32 b : 1;
- __u32 j : 1;
- __u32 e : 1;
- __u32 x : 1;
- __u32 reserved5 : 2;
- __u32 bpxn : 1;
- __u32 bpxt : 1;
- __u32 bpti : 1;
- __u32 bpni : 1;
- __u32 reserved6 : 2;
-
- __u32 d : 1;
- __u32 f : 1;
- __u32 ic : 4;
- __u32 dc : 4;
-
- __u64 reserved7;
- __u64 sf;
- __u64 rsic;
- __u64 reserved8;
-} __packed __aligned(8);
+#include <uapi/asm/runtime_instr.h>
extern struct runtime_instr_cb runtime_instr_empty_cb;
-static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
-{
- asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
- : : "Q" (*cb));
-}
-
-static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
-{
- asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
- : "=Q" (*cb) : : "cc");
-}
-
static inline void save_ri_cb(struct runtime_instr_cb *cb_prev)
{
if (cb_prev)
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index d3c1a8a2e3ad..3cae9168f63c 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -77,6 +77,7 @@ struct sclp_info {
unsigned char has_ibs : 1;
unsigned char has_skey : 1;
unsigned char has_kss : 1;
+ unsigned char has_gisaf : 1;
unsigned int ibc;
unsigned int mtid;
unsigned int mtid_cp;
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 25057c118d56..fe7b3f8f0791 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -21,7 +21,8 @@ struct sysinfo_1_1_1 {
unsigned char :8;
unsigned char ccr;
unsigned char cai;
- char reserved_0[28];
+ char reserved_0[20];
+ unsigned long lic;
char manufacturer[16];
char type[4];
char reserved_1[12];
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 25d6ec3aaddd..83ba57533ce6 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -58,6 +58,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
#define TIF_PATCH_PENDING 5 /* pending live patching update */
#define TIF_PGSTE 6 /* New mm's will use 4K page tables */
+#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
@@ -78,6 +80,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING)
+#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/uapi/asm/runtime_instr.h b/arch/s390/include/uapi/asm/runtime_instr.h
new file mode 100644
index 000000000000..45c9ec984e6b
--- /dev/null
+++ b/arch/s390/include/uapi/asm/runtime_instr.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _S390_UAPI_RUNTIME_INSTR_H
+#define _S390_UAPI_RUNTIME_INSTR_H
+
+#include <linux/types.h>
+
+#define S390_RUNTIME_INSTR_START 0x1
+#define S390_RUNTIME_INSTR_STOP 0x2
+
+struct runtime_instr_cb {
+ __u64 rca;
+ __u64 roa;
+ __u64 rla;
+
+ __u32 v : 1;
+ __u32 s : 1;
+ __u32 k : 1;
+ __u32 h : 1;
+ __u32 a : 1;
+ __u32 reserved1 : 3;
+ __u32 ps : 1;
+ __u32 qs : 1;
+ __u32 pc : 1;
+ __u32 qc : 1;
+ __u32 reserved2 : 1;
+ __u32 g : 1;
+ __u32 u : 1;
+ __u32 l : 1;
+ __u32 key : 4;
+ __u32 reserved3 : 8;
+ __u32 t : 1;
+ __u32 rgs : 3;
+
+ __u32 m : 4;
+ __u32 n : 1;
+ __u32 mae : 1;
+ __u32 reserved4 : 2;
+ __u32 c : 1;
+ __u32 r : 1;
+ __u32 b : 1;
+ __u32 j : 1;
+ __u32 e : 1;
+ __u32 x : 1;
+ __u32 reserved5 : 2;
+ __u32 bpxn : 1;
+ __u32 bpxt : 1;
+ __u32 bpti : 1;
+ __u32 bpni : 1;
+ __u32 reserved6 : 2;
+
+ __u32 d : 1;
+ __u32 f : 1;
+ __u32 ic : 4;
+ __u32 dc : 4;
+
+ __u64 reserved7;
+ __u64 sf;
+ __u64 rsic;
+ __u64 reserved8;
+} __packed __aligned(8);
+
+static inline void load_runtime_instr_cb(struct runtime_instr_cb *cb)
+{
+ asm volatile(".insn rsy,0xeb0000000060,0,0,%0" /* LRIC */
+ : : "Q" (*cb));
+}
+
+static inline void store_runtime_instr_cb(struct runtime_instr_cb *cb)
+{
+ asm volatile(".insn rsy,0xeb0000000061,0,0,%0" /* STRIC */
+ : "=Q" (*cb) : : "cc");
+}
+
+#endif /* _S390_UAPI_RUNTIME_INSTR_H */
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 909bce65cb2b..7f27e3da9709 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -29,6 +29,7 @@ UBSAN_SANITIZE_early.o := n
#
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
+CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
CFLAGS_als.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
@@ -63,6 +64,9 @@ obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
extra-y += head.o head64.o vmlinux.lds
+obj-$(CONFIG_EXPOLINE) += nospec-branch.o
+CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE)
+
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
index 574e77622c04..22476135f738 100644
--- a/arch/s390/kernel/alternative.c
+++ b/arch/s390/kernel/alternative.c
@@ -15,6 +15,29 @@ static int __init disable_alternative_instructions(char *str)
early_param("noaltinstr", disable_alternative_instructions);
+static int __init nobp_setup_early(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ if (enabled && test_facility(82))
+ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ else
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nospec", nospec_setup_early);
+
struct brcl_insn {
u16 opc;
s32 disp;
@@ -75,7 +98,8 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
- if (!test_facility(a->facility))
+ if (!__test_facility(a->facility,
+ S390_lowcore.alt_stfle_fac_list))
continue;
if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 497a92047591..ac707a9f729e 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -193,6 +193,11 @@ static noinline __init void setup_facility_list(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
+ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6cd444d25545..13a133a6015c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -107,6 +107,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
1: UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -159,6 +160,130 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
tm off+\addr, \mask
.endm
+ .macro BPOFF
+ .pushsection .altinstr_replacement, "ax"
+660: .long 0xb2e8c000
+ .popsection
+661: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 661b - .
+ .long 660b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPON
+ .pushsection .altinstr_replacement, "ax"
+662: .long 0xb2e8d000
+ .popsection
+663: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 663b - .
+ .long 662b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPENTER tif_ptr,tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .popsection
+664: TSTMSK \tif_ptr,\tif_mask
+ jz . + 8
+ .long 0xb2e8d000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 12
+ .byte 12
+ .popsection
+ .endm
+
+ .macro BPEXIT tif_ptr,tif_mask
+ TSTMSK \tif_ptr,\tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: jnz . + 8
+ .long 0xb2e8d000
+ .popsection
+664: jz . + 8
+ .long 0xb2e8c000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 8
+ .byte 8
+ .popsection
+ .endm
+
+#ifdef CONFIG_EXPOLINE
+
+ .macro GEN_BR_THUNK name,reg,tmp
+ .section .text.\name,"axG",@progbits,\name,comdat
+ .globl \name
+ .hidden \name
+ .type \name,@function
+\name:
+ .cfi_startproc
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,0f
+#else
+ larl \tmp,0f
+ ex 0,0(\tmp)
+#endif
+ j .
+0: br \reg
+ .cfi_endproc
+ .endm
+
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
+ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
+
+ .macro BASR_R14_R9
+0: brasl %r14,__s390x_indirect_jump_r1use_r9
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R1USE_R14
+0: jg __s390x_indirect_jump_r1use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+ .macro BR_R11USE_R14
+0: jg __s390x_indirect_jump_r11use_r14
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 0b-.
+ .popsection
+ .endm
+
+#else /* CONFIG_EXPOLINE */
+
+ .macro BASR_R14_R9
+ basr %r14,%r9
+ .endm
+
+ .macro BR_R1USE_R14
+ br %r14
+ .endm
+
+ .macro BR_R11USE_R14
+ br %r14
+ .endm
+
+#endif /* CONFIG_EXPOLINE */
+
+
.section .kprobes.text, "ax"
.Ldummy:
/*
@@ -171,6 +296,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
*/
nop 0
+ENTRY(__bpon)
+ .globl __bpon
+ BPON
+ BR_R1USE_R14
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -193,9 +323,9 @@ ENTRY(__switch_to)
mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
- bzr %r14
+ jz 0f
.insn s,0xb2800000,__LC_LPP # set program parameter
- br %r14
+0: BR_R1USE_R14
.L__critical_start:
@@ -207,9 +337,11 @@ ENTRY(__switch_to)
*/
ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ lg %r12,__LC_CURRENT
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -226,8 +358,12 @@ ENTRY(sie64a)
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
+ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_entry:
sie 0(%r14)
+.Lsie_exit:
+ BPOFF
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -248,9 +384,15 @@ ENTRY(sie64a)
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
- br %r14
+ BR_R1USE_R14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -273,6 +415,7 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ BPOFF
lg %r12,__LC_CURRENT
lghi %r13,__TASK_thread
lghi %r14,_PIF_SYSCALL
@@ -281,7 +424,10 @@ ENTRY(system_call)
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
.Lsysc_vtime:
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
@@ -305,7 +451,7 @@ ENTRY(system_call)
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
+ BASR_R14_R9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@@ -317,6 +463,7 @@ ENTRY(system_call)
jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
@@ -489,7 +636,7 @@ ENTRY(system_call)
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
+ BASR_R14_R9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -513,7 +660,7 @@ ENTRY(ret_from_fork)
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
- basr %r14,%r9
+ BASR_R14_R9
j .Lsysc_tracenogo
/*
@@ -522,6 +669,7 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
@@ -550,6 +698,7 @@ ENTRY(pgm_check_handler)
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 4f
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
@@ -561,6 +710,15 @@ ENTRY(pgm_check_handler)
4: lgr %r13,%r11
la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -582,9 +740,9 @@ ENTRY(pgm_check_handler)
nill %r10,0x007f
sll %r10,2
je .Lpgm_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
+ BASR_R14_R9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -620,12 +778,23 @@ ENTRY(pgm_check_handler)
ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -660,9 +829,13 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
.Lio_done:
@@ -833,12 +1006,23 @@ ENTRY(io_int_handler)
ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
lghi %r1,__LC_EXT_PARAMS2
@@ -871,11 +1055,12 @@ ENTRY(psw_idle)
.Lpsw_idle_stcctm:
#endif
oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
+ BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
- br %r14
+ BR_R1USE_R14
.Lpsw_idle_end:
/*
@@ -889,7 +1074,7 @@ ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
+ jo .Lsave_fpu_regs_exit
stfpc __THREAD_FPU_fpc(%r2)
lg %r3,__THREAD_FPU_regs(%r2)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
@@ -916,7 +1101,8 @@ ENTRY(save_fpu_regs)
std 15,120(%r3)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
- br %r14
+.Lsave_fpu_regs_exit:
+ BR_R1USE_R14
.Lsave_fpu_regs_end:
EXPORT_SYMBOL(save_fpu_regs)
@@ -934,7 +1120,7 @@ load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
+ jno .Lload_fpu_regs_exit
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -961,7 +1147,8 @@ load_fpu_regs:
ld 15,120(%r4)
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- br %r14
+.Lload_fpu_regs_exit:
+ BR_R1USE_R14
.Lload_fpu_regs_end:
.L__critical_end:
@@ -971,6 +1158,7 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
+ BPOFF
la %r1,4095 # validate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer
sckc __LC_CLOCK_COMPARATOR # validate comparator
@@ -1046,6 +1234,16 @@ ENTRY(mcck_int_handler)
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -1071,6 +1269,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
@@ -1166,7 +1365,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: br %r14
+0: BR_R11USE_R14
.align 8
.Lcleanup_table:
@@ -1197,11 +1396,12 @@ cleanup_critical:
clg %r9,BASED(.Lsie_crit_mcck_length)
jh 1f
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
-1: lg %r9,__SF_EMPTY(%r15) # get control block pointer
+1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ lg %r9,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- br %r14
+ BR_R11USE_R14
#endif
.Lcleanup_system_call:
@@ -1254,7 +1454,7 @@ cleanup_critical:
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
- br %r14
+ BR_R11USE_R14
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@@ -1266,7 +1466,7 @@ cleanup_critical:
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore:
# check if stpt has been executed
@@ -1283,14 +1483,14 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore:
# check if stpt has been executed
@@ -1304,7 +1504,7 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_R11USE_R14
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@@ -1357,17 +1557,17 @@ cleanup_critical:
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
- br %r14
+ BR_R11USE_R14
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
- br %r14
+ BR_R11USE_R14
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
- br %r14
+ BR_R11USE_R14
/*
* Integer constants
@@ -1387,7 +1587,6 @@ cleanup_critical:
.Lsie_crit_mcck_length:
.quad .Lsie_skip - .Lsie_entry
#endif
-
.section .rodata, "a"
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index da5cc3b469aa..34477c1aee6d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -543,6 +543,7 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ __bpon();
diag308(DIAG308_LOAD_CLEAR, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 943d13e90c98..60f60afa645c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -281,7 +281,7 @@ static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
* is a BUG. The code path resides in the .kprobes.text
* section and is executed with interrupts disabled.
*/
- printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr);
+ pr_err("Invalid kprobe detected.\n");
dump_kprobe(p);
BUG();
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b7abfad4fd7d..1fc6d1ff92d3 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -19,6 +19,8 @@
#include <linux/moduleloader.h>
#include <linux/bug.h>
#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
#if 0
#define DEBUGP printk
@@ -156,7 +158,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
me->arch.got_offset = me->core_layout.size;
me->core_layout.size += me->arch.got_size;
me->arch.plt_offset = me->core_layout.size;
- me->core_layout.size += me->arch.plt_size;
+ if (me->arch.plt_size) {
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_layout.size += me->arch.plt_size;
+ }
return 0;
}
@@ -310,9 +316,21 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->core_layout.base + me->arch.plt_offset +
info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
- ip[1] = 0x100a0004;
- ip[2] = 0x07f10000;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_call_disable) {
+ unsigned int *ij;
+ ij = me->core_layout.base +
+ me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ (unsigned int)(u16)
+ (((unsigned long) ij - 8 -
+ (unsigned long) ip) / 2);
+ } else {
+ ip[2] = 0x07f10000; /* br %r1 */
+ }
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
info->plt_initialized = 1;
@@ -418,16 +436,42 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me)
{
const Elf_Shdr *s;
- char *secstrings;
+ char *secstrings, *secname;
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_call_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->core_layout.base + me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ if (test_facility(35)) {
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
+ } else {
+ ij[0] = 0x44000000 | (unsigned int)
+ offsetof(struct lowcore, br_r1_trampoline);
+ ij[1] = 0xa7f40000; /* j . */
+ }
+ }
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
- if (!strcmp(".altinstructions", secstrings + s->sh_name)) {
- /* patch .altinstructions */
- void *aseg = (void *)s->sh_addr;
+ aseg = (void *) s->sh_addr;
+ secname = secstrings + s->sh_name;
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
apply_alternatives(aseg, aseg + s->sh_size);
- }
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strcmp(".nospec_call_table", secname)))
+ nospec_call_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strcmp(".nospec_return_table", secname)))
+ nospec_return_revert(aseg, aseg + s->sh_size);
}
jump_label_apply_nops(me);
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 000000000000..69d7fcf48158
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <asm/nospec-branch.h>
+
+int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
+int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+ nospec_call_disable = 1;
+ nospec_return_disable = 1;
+ return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+static int __init spectre_v2_setup_early(char *str)
+{
+ if (str && !strncmp(str, "on", 2)) {
+ nospec_call_disable = 0;
+ nospec_return_disable = 0;
+ }
+ if (str && !strncmp(str, "off", 3)) {
+ nospec_call_disable = 1;
+ nospec_return_disable = 1;
+ }
+ if (str && !strncmp(str, "auto", 4)) {
+ nospec_call_disable = 0;
+ nospec_return_disable = 1;
+ }
+ return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ u8 *instr, *thunk, *br;
+ u8 insnbuf[6];
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+ type = BRCL_EXPOLINE; /* brcl instruction */
+ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+ type = BRASL_EXPOLINE; /* brasl instruction */
+ else
+ continue;
+ thunk = instr + (*(int *)(instr + 2)) * 2;
+ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+ /* exrl %r0,<target-br> */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+ thunk[6] == 0x44 && thunk[7] == 0x00 &&
+ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+ /* larl %rx,<target br> + ex %r0,0(%rx) */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
+ switch (type) {
+ case BRCL_EXPOLINE:
+ /* brcl to thunk, replace with br + nop */
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ case BRASL_EXPOLINE:
+ /* brasl to thunk, replace with basr + nop */
+ insnbuf[0] = 0x0d;
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ break;
+ }
+
+ s390_kernel_write(instr, insnbuf, 6);
+ }
+}
+
+void __init_or_module nospec_call_revert(s32 *start, s32 *end)
+{
+ if (nospec_call_disable)
+ __nospec_revert(start, end);
+}
+
+void __init_or_module nospec_return_revert(s32 *start, s32 *end)
+{
+ if (nospec_return_disable)
+ __nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+ nospec_call_revert(__nospec_call_start, __nospec_call_end);
+ nospec_return_revert(__nospec_return_start, __nospec_return_end);
+}
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 94f90cefbffc..c5bc3f209652 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -226,7 +226,7 @@ CPUMF_EVENT_ATTR(cf_z14, L1I_OFFDRAWER_L4_SOURCED_WRITES, 0x00af);
CPUMF_EVENT_ATTR(cf_z14, BCD_DFP_EXECUTION_SLOTS, 0x00e0);
CPUMF_EVENT_ATTR(cf_z14, VX_BCD_EXECUTION_SLOTS, 0x00e1);
CPUMF_EVENT_ATTR(cf_z14, DECIMAL_INSTRUCTIONS, 0x00e2);
-CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e9);
+CPUMF_EVENT_ATTR(cf_z14, LAST_HOST_TRANSLATIONS, 0x00e8);
CPUMF_EVENT_ATTR(cf_z14, TX_NC_TABORT, 0x00f3);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 5362fd868d0d..6fe2e1875058 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -197,3 +197,21 @@ const struct seq_operations cpuinfo_op = {
.stop = c_stop,
.show = show_cpuinfo,
};
+
+int s390_isolate_bp(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index 09f5bf0d5c0c..125c7f6e8715 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -18,6 +18,8 @@
#include <asm/cpu_mf.h>
#include <asm/irq.h>
+#include "entry.h"
+
/* empty control block to disable RI by loading it */
struct runtime_instr_cb runtime_instr_empty_cb;
@@ -59,7 +61,13 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
cb->v = 1;
}
-SYSCALL_DEFINE1(s390_runtime_instr, int, command)
+/*
+ * The signum argument is unused. In older kernels it was used to
+ * specify a real-time signal. For backwards compatibility user space
+ * should pass a valid real-time signal number (the signum argument
+ * was checked in older kernels).
+ */
+SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
{
struct runtime_instr_cb *cb;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 793da97f9a6e..a6a91f01a17a 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -68,6 +68,7 @@
#include <asm/sysinfo.h>
#include <asm/numa.h>
#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
#include "entry.h"
/*
@@ -340,7 +341,9 @@ static void __init setup_lowcore(void)
lc->preempt_count = S390_lowcore.preempt_count;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
nmi_alloc_boot_cpu(lc);
vdso_alloc_boot_cpu(lc);
lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
@@ -377,6 +380,7 @@ static void __init setup_lowcore(void)
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
#endif
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -952,6 +956,8 @@ void __init setup_arch(char **cmdline_p)
set_preferred_console();
apply_alternative_instructions();
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_init_branches();
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a919b2f0141d..a4a9fe1934e9 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -214,6 +214,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (nmi_alloc_per_cpu(lc))
goto out;
if (vdso_alloc_per_cpu(lc))
@@ -266,7 +267,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
arch_spin_lock_setup(cpu);
}
@@ -317,6 +320,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
mem_assign_absolute(lc->restart_fn, (unsigned long) func);
mem_assign_absolute(lc->restart_data, (unsigned long) data);
mem_assign_absolute(lc->restart_source, source_cpu);
+ __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -901,6 +905,7 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
+ __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index a441cba8d165..fc7e04c2195b 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -89,6 +89,8 @@ static void stsi_1_1_1(struct seq_file *m, struct sysinfo_1_1_1 *info)
EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
seq_printf(m, "Manufacturer: %-16.16s\n", info->manufacturer);
seq_printf(m, "Type: %-4.4s\n", info->type);
+ if (info->lic)
+ seq_printf(m, "LIC Identifier: %016lx\n", info->lic);
/*
* Sigh: the model field has been renamed with System z9
* to model_capacity and a new model field has been added
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 608cf2987d19..08d12cfaf091 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -123,6 +123,20 @@ SECTIONS
*(.altinstr_replacement)
}
+ /*
+ * Table with the patch locations to undo expolines
+ */
+ .nospec_call_table : {
+ __nospec_call_start = . ;
+ *(.s390_indirect*)
+ __nospec_call_end = . ;
+ }
+ .nospec_return_table : {
+ __nospec_return_start = . ;
+ *(.s390_return*)
+ __nospec_return_end = . ;
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 9a4594e0a1ff..a3dbd459cce9 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -23,6 +23,7 @@ config KVM
select PREEMPT_NOTIFIERS
select ANON_INODES
select HAVE_KVM_CPU_RELAX_INTERCEPT
+ select HAVE_KVM_VCPU_ASYNC_IOCTL
select HAVE_KVM_EVENTFD
select KVM_ASYNC_PF
select KVM_ASYNC_PF_SYNC
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 89aa114a2cba..45634b3d2e0a 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -257,6 +257,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
case 0x500:
return __diag_virtio_hypercall(vcpu);
default:
+ vcpu->stat.diagnose_other++;
return -EOPNOTSUPP;
}
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 024ad8bcc516..aabf46f5f883 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -36,7 +36,7 @@ static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
{
int c, scn;
- if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
+ if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
return 0;
BUG_ON(!kvm_s390_use_sca_entries());
@@ -101,18 +101,17 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
/* another external call is pending */
return -EBUSY;
}
- atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
return 0;
}
static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
{
- struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc, expect;
if (!kvm_s390_use_sca_entries())
return;
- atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
read_lock(&vcpu->kvm->arch.sca_lock);
if (vcpu->kvm->arch.use_esca) {
struct esca_block *sca = vcpu->kvm->arch.sca;
@@ -190,8 +189,8 @@ static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
static inline int is_ioirq(unsigned long irq_type)
{
- return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
- (irq_type <= IRQ_PEND_IO_ISC_7));
+ return ((irq_type >= IRQ_PEND_IO_ISC_7) &&
+ (irq_type <= IRQ_PEND_IO_ISC_0));
}
static uint64_t isc_to_isc_bits(int isc)
@@ -199,25 +198,59 @@ static uint64_t isc_to_isc_bits(int isc)
return (0x80 >> isc) << 24;
}
+static inline u32 isc_to_int_word(u8 isc)
+{
+ return ((u32)isc << 27) | 0x80000000;
+}
+
static inline u8 int_word_to_isc(u32 int_word)
{
return (int_word & 0x38000000) >> 27;
}
+/*
+ * To use atomic bitmap functions, we have to provide a bitmap address
+ * that is u64 aligned. However, the ipm might be u32 aligned.
+ * Therefore, we logically start the bitmap at the very beginning of the
+ * struct and fixup the bit number.
+ */
+#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
+
+static inline void kvm_s390_gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+{
+ set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
+}
+
+static inline u8 kvm_s390_gisa_get_ipm(struct kvm_s390_gisa *gisa)
+{
+ return READ_ONCE(gisa->ipm);
+}
+
+static inline void kvm_s390_gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+{
+ clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
+}
+
+static inline int kvm_s390_gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
+{
+ return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
+}
+
static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.float_int.pending_irqs |
- vcpu->arch.local_int.pending_irqs;
+ vcpu->arch.local_int.pending_irqs |
+ kvm_s390_gisa_get_ipm(vcpu->kvm->arch.gisa) << IRQ_PEND_IO_ISC_7;
}
static inline int isc_to_irq_type(unsigned long isc)
{
- return IRQ_PEND_IO_ISC_0 + isc;
+ return IRQ_PEND_IO_ISC_0 - isc;
}
static inline int irq_type_to_isc(unsigned long irq_type)
{
- return irq_type - IRQ_PEND_IO_ISC_0;
+ return IRQ_PEND_IO_ISC_0 - irq_type;
}
static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
@@ -278,20 +311,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
- atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
- set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
+ set_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
- atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
- clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
+ clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
{
- atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
- &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
+ CPUSTAT_STOP_INT);
vcpu->arch.sie_block->lctl = 0x0000;
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
@@ -302,17 +335,12 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
}
}
-static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
-{
- atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
-}
-
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
{
if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
return;
else if (psw_ioint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_IO_INT);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR6;
}
@@ -322,7 +350,7 @@ static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
return;
if (psw_extint_disabled(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
else
vcpu->arch.sie_block->lctl |= LCTL_CR0;
}
@@ -340,7 +368,7 @@ static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
{
if (kvm_s390_is_stop_irq_pending(vcpu))
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
}
/* Set interception request for non-deliverable interrupts */
@@ -897,18 +925,38 @@ static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
return rc ? -EFAULT : 0;
}
+static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
+{
+ int rc;
+
+ rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
+ rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
+ rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
+ rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
+ rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
+ &vcpu->arch.sie_block->gpsw,
+ sizeof(psw_t));
+ return rc ? -EFAULT : 0;
+}
+
static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
unsigned long irq_type)
{
struct list_head *isc_list;
struct kvm_s390_float_interrupt *fi;
struct kvm_s390_interrupt_info *inti = NULL;
+ struct kvm_s390_io_info io;
+ u32 isc;
int rc = 0;
fi = &vcpu->kvm->arch.float_int;
spin_lock(&fi->lock);
- isc_list = &fi->lists[irq_type_to_isc(irq_type)];
+ isc = irq_type_to_isc(irq_type);
+ isc_list = &fi->lists[isc];
inti = list_first_entry_or_null(isc_list,
struct kvm_s390_interrupt_info,
list);
@@ -936,24 +984,31 @@ static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
spin_unlock(&fi->lock);
if (inti) {
- rc = put_guest_lc(vcpu, inti->io.subchannel_id,
- (u16 *)__LC_SUBCHANNEL_ID);
- rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
- (u16 *)__LC_SUBCHANNEL_NR);
- rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
- (u32 *)__LC_IO_INT_PARM);
- rc |= put_guest_lc(vcpu, inti->io.io_int_word,
- (u32 *)__LC_IO_INT_WORD);
- rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
- rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
- &vcpu->arch.sie_block->gpsw,
- sizeof(psw_t));
+ rc = __do_deliver_io(vcpu, &(inti->io));
kfree(inti);
+ goto out;
}
- return rc ? -EFAULT : 0;
+ if (vcpu->kvm->arch.gisa &&
+ kvm_s390_gisa_tac_ipm_gisc(vcpu->kvm->arch.gisa, isc)) {
+ /*
+ * in case an adapter interrupt was not delivered
+ * in SIE context KVM will handle the delivery
+ */
+ VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
+ memset(&io, 0, sizeof(io));
+ io.io_int_word = isc_to_int_word(isc);
+ vcpu->stat.deliver_io_int++;
+ trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
+ KVM_S390_INT_IO(1, 0, 0, 0),
+ ((__u32)io.subchannel_id << 16) |
+ io.subchannel_nr,
+ ((__u64)io.io_int_parm << 32) |
+ io.io_int_word);
+ rc = __do_deliver_io(vcpu, &io);
+ }
+out:
+ return rc;
}
typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
@@ -1155,8 +1210,8 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
while ((irqs = deliverable_irqs(vcpu)) && !rc) {
- /* bits are in the order of interrupt priority */
- irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
+ /* bits are in the reverse order of interrupt priority */
+ irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
if (is_ioirq(irq_type)) {
rc = __deliver_io(vcpu, irq_type);
} else {
@@ -1228,7 +1283,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
li->irq.ext = irq->u.ext;
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
@@ -1253,7 +1308,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
return -EBUSY;
*extcall = irq->u.extcall;
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
@@ -1297,7 +1352,7 @@ static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
return -EBUSY;
stop->flags = irq->u.stop.flags;
- __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
return 0;
}
@@ -1329,7 +1384,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
@@ -1373,7 +1428,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
0, 0);
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
@@ -1386,7 +1441,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0, 0);
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
return 0;
}
@@ -1416,20 +1471,86 @@ static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
return NULL;
}
+static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
+ u64 isc_mask, u32 schid)
+{
+ struct kvm_s390_interrupt_info *inti = NULL;
+ int isc;
+
+ for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
+ if (isc_mask & isc_to_isc_bits(isc))
+ inti = get_io_int(kvm, isc, schid);
+ }
+ return inti;
+}
+
+static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
+{
+ unsigned long active_mask;
+ int isc;
+
+ if (schid)
+ goto out;
+ if (!kvm->arch.gisa)
+ goto out;
+
+ active_mask = (isc_mask & kvm_s390_gisa_get_ipm(kvm->arch.gisa) << 24) << 32;
+ while (active_mask) {
+ isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
+ if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, isc))
+ return isc;
+ clear_bit_inv(isc, &active_mask);
+ }
+out:
+ return -EINVAL;
+}
+
/*
* Dequeue and return an I/O interrupt matching any of the interruption
* subclasses as designated by the isc mask in cr6 and the schid (if != 0).
+ * Take into account the interrupts pending in the interrupt list and in GISA.
+ *
+ * Note that for a guest that does not enable I/O interrupts
+ * but relies on TPI, a flood of classic interrupts may starve
+ * out adapter interrupts on the same isc. Linux does not do
+ * that, and it is possible to work around the issue by configuring
+ * different iscs for classic and adapter interrupts in the guest,
+ * but we may want to revisit this in the future.
*/
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 isc_mask, u32 schid)
{
- struct kvm_s390_interrupt_info *inti = NULL;
+ struct kvm_s390_interrupt_info *inti, *tmp_inti;
int isc;
- for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
- if (isc_mask & isc_to_isc_bits(isc))
- inti = get_io_int(kvm, isc, schid);
+ inti = get_top_io_int(kvm, isc_mask, schid);
+
+ isc = get_top_gisa_isc(kvm, isc_mask, schid);
+ if (isc < 0)
+ /* no AI in GISA */
+ goto out;
+
+ if (!inti)
+ /* AI in GISA but no classical IO int */
+ goto gisa_out;
+
+ /* both types of interrupts present */
+ if (int_word_to_isc(inti->io.io_int_word) <= isc) {
+ /* classical IO int with higher priority */
+ kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ goto out;
}
+gisa_out:
+ tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+ if (tmp_inti) {
+ tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
+ tmp_inti->io.io_int_word = isc_to_int_word(isc);
+ if (inti)
+ kvm_s390_reinject_io_int(kvm, inti);
+ inti = tmp_inti;
+ } else
+ kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+out:
return inti;
}
@@ -1517,6 +1638,15 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
struct list_head *list;
int isc;
+ isc = int_word_to_isc(inti->io.io_int_word);
+
+ if (kvm->arch.gisa && inti->type & KVM_S390_INT_IO_AI_MASK) {
+ VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
+ kvm_s390_gisa_set_ipm_gisc(kvm->arch.gisa, isc);
+ kfree(inti);
+ return 0;
+ }
+
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
@@ -1532,7 +1662,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
inti->io.subchannel_id >> 8,
inti->io.subchannel_id >> 1 & 0x3,
inti->io.subchannel_nr);
- isc = int_word_to_isc(inti->io.io_int_word);
list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
list_add_tail(&inti->list, list);
set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
@@ -1546,7 +1675,6 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
static void __floating_irq_kick(struct kvm *kvm, u64 type)
{
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
- struct kvm_s390_local_interrupt *li;
struct kvm_vcpu *dst_vcpu;
int sigcpu, online_vcpus, nr_tries = 0;
@@ -1568,20 +1696,17 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
- li = &dst_vcpu->arch.local_int;
- spin_lock(&li->lock);
switch (type) {
case KVM_S390_MCHK:
- atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- atomic_or(CPUSTAT_IO_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
break;
default:
- atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
+ kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
break;
}
- spin_unlock(&li->lock);
kvm_s390_vcpu_wakeup(dst_vcpu);
}
@@ -1820,6 +1945,7 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
for (i = 0; i < FIRQ_MAX_COUNT; i++)
fi->counters[i] = 0;
spin_unlock(&fi->lock);
+ kvm_s390_gisa_clear(kvm);
};
static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
@@ -1847,6 +1973,22 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
max_irqs = len / sizeof(struct kvm_s390_irq);
+ if (kvm->arch.gisa &&
+ kvm_s390_gisa_get_ipm(kvm->arch.gisa)) {
+ for (i = 0; i <= MAX_ISC; i++) {
+ if (n == max_irqs) {
+ /* signal userspace to try again */
+ ret = -ENOMEM;
+ goto out_nolock;
+ }
+ if (kvm_s390_gisa_tac_ipm_gisc(kvm->arch.gisa, i)) {
+ irq = (struct kvm_s390_irq *) &buf[n];
+ irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
+ irq->u.io.io_int_word = isc_to_int_word(i);
+ n++;
+ }
+ }
+ }
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
for (i = 0; i < FIRQ_LIST_COUNT; i++) {
@@ -1885,6 +2027,7 @@ static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
out:
spin_unlock(&fi->lock);
+out_nolock:
if (!ret && n > 0) {
if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
ret = -EFAULT;
@@ -2245,7 +2388,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
struct kvm_s390_interrupt s390int = {
.type = KVM_S390_INT_IO(1, 0, 0, 0),
.parm = 0,
- .parm64 = (adapter->isc << 27) | 0x80000000,
+ .parm64 = isc_to_int_word(adapter->isc),
};
int ret = 0;
@@ -2687,3 +2830,28 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
return n;
}
+
+void kvm_s390_gisa_clear(struct kvm *kvm)
+{
+ if (kvm->arch.gisa) {
+ memset(kvm->arch.gisa, 0, sizeof(struct kvm_s390_gisa));
+ kvm->arch.gisa->next_alert = (u32)(u64)kvm->arch.gisa;
+ VM_EVENT(kvm, 3, "gisa 0x%pK cleared", kvm->arch.gisa);
+ }
+}
+
+void kvm_s390_gisa_init(struct kvm *kvm)
+{
+ if (css_general_characteristics.aiv) {
+ kvm->arch.gisa = &kvm->arch.sie_page2->gisa;
+ VM_EVENT(kvm, 3, "gisa 0x%pK initialized", kvm->arch.gisa);
+ kvm_s390_gisa_clear(kvm);
+ }
+}
+
+void kvm_s390_gisa_destroy(struct kvm *kvm)
+{
+ if (!kvm->arch.gisa)
+ return;
+ kvm->arch.gisa = NULL;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 1371dff2b90d..ba4c7092335a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2,7 +2,7 @@
/*
* hosting IBM Z kernel virtual machines (s390x)
*
- * Copyright IBM Corp. 2008, 2017
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -87,19 +87,31 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
{ "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
{ "exit_wait_state", VCPU_STAT(exit_wait_state) },
+ { "instruction_epsw", VCPU_STAT(instruction_epsw) },
+ { "instruction_gs", VCPU_STAT(instruction_gs) },
+ { "instruction_io_other", VCPU_STAT(instruction_io_other) },
+ { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
+ { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
{ "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
+ { "instruction_ptff", VCPU_STAT(instruction_ptff) },
{ "instruction_stidp", VCPU_STAT(instruction_stidp) },
+ { "instruction_sck", VCPU_STAT(instruction_sck) },
+ { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
{ "instruction_spx", VCPU_STAT(instruction_spx) },
{ "instruction_stpx", VCPU_STAT(instruction_stpx) },
{ "instruction_stap", VCPU_STAT(instruction_stap) },
- { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+ { "instruction_iske", VCPU_STAT(instruction_iske) },
+ { "instruction_ri", VCPU_STAT(instruction_ri) },
+ { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
+ { "instruction_sske", VCPU_STAT(instruction_sske) },
{ "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
- { "instruction_stsch", VCPU_STAT(instruction_stsch) },
- { "instruction_chsc", VCPU_STAT(instruction_chsc) },
{ "instruction_essa", VCPU_STAT(instruction_essa) },
{ "instruction_stsi", VCPU_STAT(instruction_stsi) },
{ "instruction_stfl", VCPU_STAT(instruction_stfl) },
+ { "instruction_tb", VCPU_STAT(instruction_tb) },
+ { "instruction_tpi", VCPU_STAT(instruction_tpi) },
{ "instruction_tprot", VCPU_STAT(instruction_tprot) },
+ { "instruction_tsch", VCPU_STAT(instruction_tsch) },
{ "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
{ "instruction_sie", VCPU_STAT(instruction_sie) },
{ "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
@@ -118,12 +130,13 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
{ "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
{ "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
- { "diagnose_10", VCPU_STAT(diagnose_10) },
- { "diagnose_44", VCPU_STAT(diagnose_44) },
- { "diagnose_9c", VCPU_STAT(diagnose_9c) },
- { "diagnose_258", VCPU_STAT(diagnose_258) },
- { "diagnose_308", VCPU_STAT(diagnose_308) },
- { "diagnose_500", VCPU_STAT(diagnose_500) },
+ { "instruction_diag_10", VCPU_STAT(diagnose_10) },
+ { "instruction_diag_44", VCPU_STAT(diagnose_44) },
+ { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
+ { "instruction_diag_258", VCPU_STAT(diagnose_258) },
+ { "instruction_diag_308", VCPU_STAT(diagnose_308) },
+ { "instruction_diag_500", VCPU_STAT(diagnose_500) },
+ { "instruction_diag_other", VCPU_STAT(diagnose_other) },
{ NULL }
};
@@ -576,7 +589,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
case KVM_CAP_S390_GS:
r = -EINVAL;
mutex_lock(&kvm->lock);
- if (atomic_read(&kvm->online_vcpus)) {
+ if (kvm->created_vcpus) {
r = -EBUSY;
} else if (test_facility(133)) {
set_kvm_facility(kvm->arch.model.fac_mask, 133);
@@ -1088,7 +1101,6 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
struct kvm_device_attr *attr)
{
struct kvm_s390_vm_cpu_feat data;
- int ret = -EBUSY;
if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
return -EFAULT;
@@ -1098,13 +1110,18 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
return -EINVAL;
mutex_lock(&kvm->lock);
- if (!atomic_read(&kvm->online_vcpus)) {
- bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
- KVM_S390_VM_CPU_FEAT_NR_BITS);
- ret = 0;
+ if (kvm->created_vcpus) {
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
}
+ bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
+ KVM_S390_VM_CPU_FEAT_NR_BITS);
mutex_unlock(&kvm->lock);
- return ret;
+ VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
+ data.feat[0],
+ data.feat[1],
+ data.feat[2]);
+ return 0;
}
static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
@@ -1206,6 +1223,10 @@ static int kvm_s390_get_processor_feat(struct kvm *kvm,
KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
+ VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
+ data.feat[0],
+ data.feat[1],
+ data.feat[2]);
return 0;
}
@@ -1219,6 +1240,10 @@ static int kvm_s390_get_machine_feat(struct kvm *kvm,
KVM_S390_VM_CPU_FEAT_NR_BITS);
if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
return -EFAULT;
+ VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
+ data.feat[0],
+ data.feat[1],
+ data.feat[2]);
return 0;
}
@@ -1911,6 +1936,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (!kvm->arch.dbf)
goto out_err;
+ BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
kvm->arch.sie_page2 =
(struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!kvm->arch.sie_page2)
@@ -1981,6 +2007,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
spin_lock_init(&kvm->arch.start_stop_lock);
kvm_s390_vsie_init(kvm);
+ kvm_s390_gisa_init(kvm);
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
return 0;
@@ -2043,6 +2070,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_free_vcpus(kvm);
sca_dispose(kvm);
debug_unregister(kvm->arch.dbf);
+ kvm_s390_gisa_destroy(kvm);
free_page((unsigned long)kvm->arch.sie_page2);
if (!kvm_is_ucontrol(kvm))
gmap_remove(kvm->arch.gmap);
@@ -2314,7 +2342,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
gmap_enable(vcpu->arch.enabled_gmap);
- atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__start_cpu_timer_accounting(vcpu);
vcpu->cpu = cpu;
@@ -2325,7 +2353,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->cpu = -1;
if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
__stop_cpu_timer_accounting(vcpu);
- atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
vcpu->arch.enabled_gmap = gmap_get_enabled();
gmap_disable(vcpu->arch.enabled_gmap);
@@ -2422,9 +2450,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
CPUSTAT_STOPPED);
if (test_kvm_facility(vcpu->kvm, 78))
- atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
else if (test_kvm_facility(vcpu->kvm, 8))
- atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
kvm_s390_vcpu_setup_model(vcpu);
@@ -2456,12 +2484,17 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
if (test_kvm_facility(vcpu->kvm, 139))
vcpu->arch.sie_block->ecd |= ECD_MEF;
+ if (vcpu->arch.sie_block->gd) {
+ vcpu->arch.sie_block->eca |= ECA_AIV;
+ VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
+ vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
+ }
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
| SDNXC;
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
if (sclp.has_kss)
- atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
else
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
@@ -2508,9 +2541,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
vcpu->arch.sie_block->icpua = id;
spin_lock_init(&vcpu->arch.local_int.lock);
- vcpu->arch.local_int.float_int = &kvm->arch.float_int;
- vcpu->arch.local_int.wq = &vcpu->wq;
- vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+ vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
+ if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
+ vcpu->arch.sie_block->gd |= GISA_FORMAT1;
seqcount_init(&vcpu->arch.cputm_seqcount);
rc = kvm_vcpu_init(vcpu, kvm, id);
@@ -2567,7 +2600,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
- atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
@@ -2720,47 +2753,70 @@ static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ vcpu_load(vcpu);
memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ vcpu_load(vcpu);
memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ vcpu_load(vcpu);
+
memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
+
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ vcpu_load(vcpu);
+
memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
+
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- if (test_fp_ctl(fpu->fpc))
- return -EINVAL;
+ int ret = 0;
+
+ vcpu_load(vcpu);
+
+ if (test_fp_ctl(fpu->fpc)) {
+ ret = -EINVAL;
+ goto out;
+ }
vcpu->run->s.regs.fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
(freg_t *) fpu->fprs);
else
memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
- return 0;
+
+out:
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
+ vcpu_load(vcpu);
+
/* make sure we have the latest values */
save_fpu_regs();
if (MACHINE_HAS_VX)
@@ -2769,6 +2825,8 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
else
memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
fpu->fpc = vcpu->run->s.regs.fpc;
+
+ vcpu_put(vcpu);
return 0;
}
@@ -2800,41 +2858,56 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
{
int rc = 0;
+ vcpu_load(vcpu);
+
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
- if (dbg->control & ~VALID_GUESTDBG_FLAGS)
- return -EINVAL;
- if (!sclp.has_gpere)
- return -EINVAL;
+ if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
+ rc = -EINVAL;
+ goto out;
+ }
+ if (!sclp.has_gpere) {
+ rc = -EINVAL;
+ goto out;
+ }
if (dbg->control & KVM_GUESTDBG_ENABLE) {
vcpu->guest_debug = dbg->control;
/* enforce guest PER */
- atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
rc = kvm_s390_import_bp_data(vcpu, dbg);
} else {
- atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
vcpu->arch.guestdbg.last_bp = 0;
}
if (rc) {
vcpu->guest_debug = 0;
kvm_s390_clear_bp_data(vcpu);
- atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
}
+out:
+ vcpu_put(vcpu);
return rc;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int ret;
+
+ vcpu_load(vcpu);
+
/* CHECK_STOP and LOAD are not supported yet */
- return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
- KVM_MP_STATE_OPERATING;
+ ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
+ KVM_MP_STATE_OPERATING;
+
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
@@ -2842,6 +2915,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
{
int rc = 0;
+ vcpu_load(vcpu);
+
/* user space knows about this interface - let it control the state */
vcpu->kvm->arch.user_cpu_state_ctrl = 1;
@@ -2859,12 +2934,13 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
rc = -ENXIO;
}
+ vcpu_put(vcpu);
return rc;
}
static bool ibs_enabled(struct kvm_vcpu *vcpu)
{
- return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
+ return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
}
static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
@@ -2900,8 +2976,7 @@ retry:
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
if (!ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
- atomic_or(CPUSTAT_IBS,
- &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
@@ -2909,8 +2984,7 @@ retry:
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
if (ibs_enabled(vcpu)) {
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
- atomic_andnot(CPUSTAT_IBS,
- &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
}
goto retry;
}
@@ -3390,9 +3464,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (kvm_run->immediate_exit)
return -EINTR;
+ vcpu_load(vcpu);
+
if (guestdbg_exit_pending(vcpu)) {
kvm_s390_prepare_debug_exit(vcpu);
- return 0;
+ rc = 0;
+ goto out;
}
kvm_sigset_activate(vcpu);
@@ -3402,7 +3479,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} else if (is_vcpu_stopped(vcpu)) {
pr_err_ratelimited("can't run stopped vcpu %d\n",
vcpu->vcpu_id);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
sync_regs(vcpu, kvm_run);
@@ -3432,6 +3510,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_sigset_deactivate(vcpu);
vcpu->stat.exit_userspace++;
+out:
+ vcpu_put(vcpu);
return rc;
}
@@ -3560,7 +3640,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
__disable_ibs_on_all_vcpus(vcpu->kvm);
}
- atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
/*
* Another VCPU might have used IBS while we were offline.
* Let's play safe and flush the VCPU at startup.
@@ -3586,7 +3666,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
kvm_s390_clear_stop_irq(vcpu);
- atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
@@ -3693,36 +3773,45 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
return r;
}
-long kvm_arch_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
- int idx;
- long r;
switch (ioctl) {
case KVM_S390_IRQ: {
struct kvm_s390_irq s390irq;
- r = -EFAULT;
if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
- break;
- r = kvm_s390_inject_vcpu(vcpu, &s390irq);
- break;
+ return -EFAULT;
+ return kvm_s390_inject_vcpu(vcpu, &s390irq);
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
- r = -EFAULT;
if (copy_from_user(&s390int, argp, sizeof(s390int)))
- break;
+ return -EFAULT;
if (s390int_to_s390irq(&s390int, &s390irq))
return -EINVAL;
- r = kvm_s390_inject_vcpu(vcpu, &s390irq);
- break;
+ return kvm_s390_inject_vcpu(vcpu, &s390irq);
+ }
}
+ return -ENOIOCTLCMD;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int idx;
+ long r;
+
+ vcpu_load(vcpu);
+
+ switch (ioctl) {
case KVM_S390_STORE_STATUS:
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_s390_vcpu_store_status(vcpu, arg);
@@ -3847,6 +3936,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
default:
r = -ENOTTY;
}
+
+ vcpu_put(vcpu);
return r;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 5e46ba429bcb..bd31b37b0e6f 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -47,14 +47,29 @@ do { \
d_args); \
} while (0)
+static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
+{
+ atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
+}
+
+static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
+{
+ atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
+}
+
+static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
+{
+ return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
+}
+
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
{
- return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
+ return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
}
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
- return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+ return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.float_int.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -367,6 +382,9 @@ int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
void __user *buf, int len);
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
__u8 __user *buf, int len);
+void kvm_s390_gisa_init(struct kvm *kvm);
+void kvm_s390_gisa_clear(struct kvm *kvm);
+void kvm_s390_gisa_destroy(struct kvm *kvm);
/* implemented in guestdbg.c */
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0714bfa56da0..c4c4e157c036 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -2,7 +2,7 @@
/*
* handling privileged instructions
*
- * Copyright IBM Corp. 2008, 2013
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -34,6 +34,8 @@
static int handle_ri(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.instruction_ri++;
+
if (test_kvm_facility(vcpu->kvm, 64)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
@@ -53,6 +55,8 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
static int handle_gs(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.instruction_gs++;
+
if (test_kvm_facility(vcpu->kvm, 133)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
preempt_disable();
@@ -85,6 +89,8 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
u8 ar;
u64 op2, val;
+ vcpu->stat.instruction_sck++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -203,14 +209,14 @@ int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
trace_kvm_s390_skey_related_inst(vcpu);
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
- !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
+ !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
return rc;
rc = s390_enable_skey();
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
if (!rc) {
- if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
- atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
+ if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
else
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
ICTL_RRBE);
@@ -222,7 +228,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
{
int rc;
- vcpu->stat.instruction_storage_key++;
rc = kvm_s390_skey_check_enable(vcpu);
if (rc)
return rc;
@@ -242,6 +247,8 @@ static int handle_iske(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ vcpu->stat.instruction_iske++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -274,6 +281,8 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ vcpu->stat.instruction_rrbe++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -312,6 +321,8 @@ static int handle_sske(struct kvm_vcpu *vcpu)
int reg1, reg2;
int rc;
+ vcpu->stat.instruction_sske++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -392,6 +403,8 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
gpa_t addr;
int reg2;
+ vcpu->stat.instruction_tb++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -424,6 +437,8 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
u64 addr;
u8 ar;
+ vcpu->stat.instruction_tpi++;
+
addr = kvm_s390_get_base_disp_s(vcpu, &ar);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
@@ -484,6 +499,8 @@ static int handle_tsch(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti = NULL;
const u64 isc_mask = 0xffUL << 24; /* all iscs set */
+ vcpu->stat.instruction_tsch++;
+
/* a valid schid has at least one bit set */
if (vcpu->run->s.regs.gprs[1])
inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
@@ -527,6 +544,7 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->ipa == 0xb235)
return handle_tsch(vcpu);
/* Handle in userspace. */
+ vcpu->stat.instruction_io_other++;
return -EOPNOTSUPP;
} else {
/*
@@ -592,6 +610,8 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
int rc;
u8 ar;
+ vcpu->stat.instruction_lpsw++;
+
if (gpsw->mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -619,6 +639,8 @@ static int handle_lpswe(struct kvm_vcpu *vcpu)
int rc;
u8 ar;
+ vcpu->stat.instruction_lpswe++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -828,6 +850,8 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
{
int reg1, reg2;
+ vcpu->stat.instruction_epsw++;
+
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* This basically extracts the mask half of the psw. */
@@ -1332,6 +1356,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
{
u32 value;
+ vcpu->stat.instruction_sckpf++;
+
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -1347,6 +1373,8 @@ static int handle_sckpf(struct kvm_vcpu *vcpu)
static int handle_ptff(struct kvm_vcpu *vcpu)
{
+ vcpu->stat.instruction_ptff++;
+
/* we don't emulate any control instructions yet */
kvm_s390_set_psw_cc(vcpu, 3);
return 0;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index c1f5cde2c878..683036c1c92a 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -20,22 +20,18 @@
static int __sigp_sense(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
u64 *reg)
{
- struct kvm_s390_local_interrupt *li;
- int cpuflags;
+ const bool stopped = kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED);
int rc;
int ext_call_pending;
- li = &dst_vcpu->arch.local_int;
-
- cpuflags = atomic_read(li->cpuflags);
ext_call_pending = kvm_s390_ext_call_pending(dst_vcpu);
- if (!(cpuflags & CPUSTAT_STOPPED) && !ext_call_pending)
+ if (!stopped && !ext_call_pending)
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
else {
*reg &= 0xffffffff00000000UL;
if (ext_call_pending)
*reg |= SIGP_STATUS_EXT_CALL_PENDING;
- if (cpuflags & CPUSTAT_STOPPED)
+ if (stopped)
*reg |= SIGP_STATUS_STOPPED;
rc = SIGP_CC_STATUS_STORED;
}
@@ -208,11 +204,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu,
u32 addr, u64 *reg)
{
- int flags;
int rc;
- flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
- if (!(flags & CPUSTAT_STOPPED)) {
+ if (!kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_STOPPED)) {
*reg &= 0xffffffff00000000UL;
*reg |= SIGP_STATUS_INCORRECT_STATE;
return SIGP_CC_STATUS_STORED;
@@ -231,7 +225,6 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu,
static int __sigp_sense_running(struct kvm_vcpu *vcpu,
struct kvm_vcpu *dst_vcpu, u64 *reg)
{
- struct kvm_s390_local_interrupt *li;
int rc;
if (!test_kvm_facility(vcpu->kvm, 9)) {
@@ -240,8 +233,7 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu,
return SIGP_CC_STATUS_STORED;
}
- li = &dst_vcpu->arch.local_int;
- if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+ if (kvm_s390_test_cpuflags(dst_vcpu, CPUSTAT_RUNNING)) {
/* running */
rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 751348348477..ec772700ff96 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -28,13 +28,23 @@ struct vsie_page {
* the same offset as that in struct sie_page!
*/
struct mcck_volatile_info mcck_info; /* 0x0200 */
- /* the pinned originial scb */
+ /*
+ * The pinned original scb. Be aware that other VCPUs can modify
+ * it while we read from it. Values that are used for conditions or
+ * are reused conditionally, should be accessed via READ_ONCE.
+ */
struct kvm_s390_sie_block *scb_o; /* 0x0218 */
/* the shadow gmap in use by the vsie_page */
struct gmap *gmap; /* 0x0220 */
/* address of the last reported fault to guest2 */
unsigned long fault_addr; /* 0x0228 */
- __u8 reserved[0x0700 - 0x0230]; /* 0x0230 */
+ /* calculated guest addresses of satellite control blocks */
+ gpa_t sca_gpa; /* 0x0230 */
+ gpa_t itdba_gpa; /* 0x0238 */
+ gpa_t gvrd_gpa; /* 0x0240 */
+ gpa_t riccbd_gpa; /* 0x0248 */
+ gpa_t sdnx_gpa; /* 0x0250 */
+ __u8 reserved[0x0700 - 0x0258]; /* 0x0258 */
struct kvm_s390_crypto_cb crycb; /* 0x0700 */
__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
};
@@ -140,12 +150,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
- u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
+ const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
+ const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
scb_s->crycbd = 0;
- if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
+ if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
return 0;
/* format-1 is supported with message-security-assist extension 3 */
if (!test_kvm_facility(vcpu->kvm, 76))
@@ -183,12 +194,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
+ /* READ_ONCE does not work on bitfields - use a temporary variable */
+ const uint32_t __new_ibc = scb_o->ibc;
+ const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
scb_s->ibc = 0;
/* ibc installed in g2 and requested for g3 */
- if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
- scb_s->ibc = scb_o->ibc & 0x0fffU;
+ if (vcpu->kvm->arch.model.ibc && new_ibc) {
+ scb_s->ibc = new_ibc;
/* takte care of the minimum ibc level of the machine */
if (scb_s->ibc < min_ibc)
scb_s->ibc = min_ibc;
@@ -259,6 +273,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ /* READ_ONCE does not work on bitfields - use a temporary variable */
+ const uint32_t __new_prefix = scb_o->prefix;
+ const uint32_t new_prefix = READ_ONCE(__new_prefix);
+ const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
bool had_tx = scb_s->ecb & ECB_TE;
unsigned long new_mso = 0;
int rc;
@@ -306,14 +324,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->icpua = scb_o->icpua;
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
- new_mso = scb_o->mso & 0xfffffffffff00000UL;
+ new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
/* if the hva of the prefix changes, we have to remap the prefix */
- if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
+ if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
prefix_unmapped(vsie_page);
/* SIE will do mso/msl validity and exception checks for us */
scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
scb_s->mso = new_mso;
- scb_s->prefix = scb_o->prefix;
+ scb_s->prefix = new_prefix;
/* We have to definetly flush the tlb if this scb never ran */
if (scb_s->ihcpu != 0xffffU)
@@ -325,11 +343,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
/* transactional execution */
- if (test_kvm_facility(vcpu->kvm, 73)) {
+ if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
/* remap the prefix is tx is toggled on */
- if ((scb_o->ecb & ECB_TE) && !had_tx)
+ if (!had_tx)
prefix_unmapped(vsie_page);
- scb_s->ecb |= scb_o->ecb & ECB_TE;
+ scb_s->ecb |= ECB_TE;
}
/* branch prediction */
if (test_kvm_facility(vcpu->kvm, 82))
@@ -473,46 +491,42 @@ static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
- struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
hpa_t hpa;
- gpa_t gpa;
hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
if (hpa) {
- gpa = scb_o->scaol & ~0xfUL;
- if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
- gpa |= (u64) scb_o->scaoh << 32;
- unpin_guest_page(vcpu->kvm, gpa, hpa);
+ unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
+ vsie_page->sca_gpa = 0;
scb_s->scaol = 0;
scb_s->scaoh = 0;
}
hpa = scb_s->itdba;
if (hpa) {
- gpa = scb_o->itdba & ~0xffUL;
- unpin_guest_page(vcpu->kvm, gpa, hpa);
+ unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
+ vsie_page->itdba_gpa = 0;
scb_s->itdba = 0;
}
hpa = scb_s->gvrd;
if (hpa) {
- gpa = scb_o->gvrd & ~0x1ffUL;
- unpin_guest_page(vcpu->kvm, gpa, hpa);
+ unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
+ vsie_page->gvrd_gpa = 0;
scb_s->gvrd = 0;
}
hpa = scb_s->riccbd;
if (hpa) {
- gpa = scb_o->riccbd & ~0x3fUL;
- unpin_guest_page(vcpu->kvm, gpa, hpa);
+ unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
+ vsie_page->riccbd_gpa = 0;
scb_s->riccbd = 0;
}
hpa = scb_s->sdnxo;
if (hpa) {
- gpa = scb_o->sdnxo;
- unpin_guest_page(vcpu->kvm, gpa, hpa);
+ unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
+ vsie_page->sdnx_gpa = 0;
scb_s->sdnxo = 0;
}
}
@@ -539,9 +553,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
gpa_t gpa;
int rc = 0;
- gpa = scb_o->scaol & ~0xfUL;
+ gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
- gpa |= (u64) scb_o->scaoh << 32;
+ gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
if (gpa) {
if (!(gpa & ~0x1fffUL))
rc = set_validity_icpt(scb_s, 0x0038U);
@@ -557,11 +571,12 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
if (rc)
goto unpin;
+ vsie_page->sca_gpa = gpa;
scb_s->scaoh = (u32)((u64)hpa >> 32);
scb_s->scaol = (u32)(u64)hpa;
}
- gpa = scb_o->itdba & ~0xffUL;
+ gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
if (gpa && (scb_s->ecb & ECB_TE)) {
if (!(gpa & ~0x1fffU)) {
rc = set_validity_icpt(scb_s, 0x0080U);
@@ -573,10 +588,11 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
rc = set_validity_icpt(scb_s, 0x0080U);
goto unpin;
}
+ vsie_page->itdba_gpa = gpa;
scb_s->itdba = hpa;
}
- gpa = scb_o->gvrd & ~0x1ffUL;
+ gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x1310U);
@@ -591,10 +607,11 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
rc = set_validity_icpt(scb_s, 0x1310U);
goto unpin;
}
+ vsie_page->gvrd_gpa = gpa;
scb_s->gvrd = hpa;
}
- gpa = scb_o->riccbd & ~0x3fUL;
+ gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
if (gpa && (scb_s->ecb3 & ECB3_RI)) {
if (!(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x0043U);
@@ -607,13 +624,14 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
goto unpin;
}
/* Validity 0x0044 will be checked by SIE */
+ vsie_page->riccbd_gpa = gpa;
scb_s->riccbd = hpa;
}
if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
unsigned long sdnxc;
- gpa = scb_o->sdnxo & ~0xfUL;
- sdnxc = scb_o->sdnxo & 0xfUL;
+ gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
+ sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
if (!gpa || !(gpa & ~0x1fffUL)) {
rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin;
@@ -634,6 +652,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
rc = set_validity_icpt(scb_s, 0x10b0U);
goto unpin;
}
+ vsie_page->sdnx_gpa = gpa;
scb_s->sdnxo = hpa | sdnxc;
}
return 0;
@@ -778,7 +797,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page)
static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
{
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
- __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
+ __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
if (fac && test_kvm_facility(vcpu->kvm, 7)) {
retry_vsie_icpt(vsie_page);
@@ -904,7 +923,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu,
* External calls have to lead to a kick of the vcpu and
* therefore the vsie -> Simulate Wait state.
*/
- atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
/*
* We have to adjust the g3 epoch by the g2 epoch. The epoch will
* automatically be adjusted on tod clock changes via kvm_sync_clock.
@@ -926,7 +945,7 @@ static void register_shadow_scb(struct kvm_vcpu *vcpu,
*/
static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
{
- atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+ kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
WRITE_ONCE(vcpu->arch.vsie_block, NULL);
}
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 05d459b638f5..2c55a2b9d6c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -815,27 +815,17 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
* @ptl: pointer to the spinlock pointer
*
* Returns a pointer to the locked pte for a guest address, or NULL
- *
- * Note: Can also be called for shadow gmaps.
*/
static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
spinlock_t **ptl)
{
unsigned long *table;
- if (gmap_is_shadow(gmap))
- spin_lock(&gmap->guest_table_lock);
+ BUG_ON(gmap_is_shadow(gmap));
/* Walk the gmap page table, lock and get pte pointer */
table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
- if (!table || *table & _SEGMENT_ENTRY_INVALID) {
- if (gmap_is_shadow(gmap))
- spin_unlock(&gmap->guest_table_lock);
+ if (!table || *table & _SEGMENT_ENTRY_INVALID)
return NULL;
- }
- if (gmap_is_shadow(gmap)) {
- *ptl = &gmap->guest_table_lock;
- return pte_offset_map((pmd_t *) table, gaddr);
- }
return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
}
@@ -889,8 +879,6 @@ static void gmap_pte_op_end(spinlock_t *ptl)
* -EFAULT if gaddr is invalid (or mapping for shadows is missing).
*
* Called with sg->mm->mmap_sem in read.
- *
- * Note: Can also be called for shadow gmaps.
*/
static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
unsigned long len, int prot, unsigned long bits)
@@ -900,6 +888,7 @@ static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
pte_t *ptep;
int rc;
+ BUG_ON(gmap_is_shadow(gmap));
while (len) {
rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
@@ -960,7 +949,8 @@ EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
* @val: pointer to the unsigned long value to return
*
* Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
- * if reading using the virtual address failed.
+ * if reading using the virtual address failed. -EINVAL if called on a gmap
+ * shadow.
*
* Called with gmap->mm->mmap_sem in read.
*/
@@ -971,6 +961,9 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
pte_t *ptep, pte;
int rc;
+ if (gmap_is_shadow(gmap))
+ return -EINVAL;
+
while (1) {
rc = -EAGAIN;
ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
@@ -1028,18 +1021,17 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
}
/**
- * gmap_protect_rmap - modify access rights to memory and create an rmap
+ * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
* @sg: pointer to the shadow guest address space structure
* @raddr: rmap address in the shadow gmap
* @paddr: address in the parent guest address space
* @len: length of the memory area to protect
- * @prot: indicates access rights: none, read-only or read-write
*
* Returns 0 if successfully protected and the rmap was created, -ENOMEM
* if out of memory and -EFAULT if paddr is invalid.
*/
static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
- unsigned long paddr, unsigned long len, int prot)
+ unsigned long paddr, unsigned long len)
{
struct gmap *parent;
struct gmap_rmap *rmap;
@@ -1067,7 +1059,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
ptep = gmap_pte_op_walk(parent, paddr, &ptl);
if (ptep) {
spin_lock(&sg->guest_table_lock);
- rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
+ rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
PGSTE_VSIE_BIT);
if (!rc)
gmap_insert_rmap(sg, vmaddr, rmap);
@@ -1077,7 +1069,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
radix_tree_preload_end();
if (rc) {
kfree(rmap);
- rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
+ rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
if (rc)
return rc;
continue;
@@ -1616,7 +1608,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
origin = r2t & _REGION_ENTRY_ORIGIN;
offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 4);
@@ -1699,7 +1691,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
origin = r3t & _REGION_ENTRY_ORIGIN;
offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 3);
@@ -1783,7 +1775,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
origin = sgt & _REGION_ENTRY_ORIGIN;
offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
- rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 2);
@@ -1902,7 +1894,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
/* Make pgt read-only in parent gmap page table (not the pgste) */
raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
- rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
+ rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
spin_lock(&sg->guest_table_lock);
if (!rc) {
table = gmap_table_walk(sg, saddr, 1);
@@ -2005,7 +1997,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_page);
* Called with sg->parent->shadow_lock.
*/
static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
- unsigned long gaddr, pte_t *pte)
+ unsigned long gaddr)
{
struct gmap_rmap *rmap, *rnext, *head;
unsigned long start, end, bits, raddr;
@@ -2090,7 +2082,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
spin_lock(&gmap->shadow_lock);
list_for_each_entry_safe(sg, next,
&gmap->children, list)
- gmap_shadow_notify(sg, vmaddr, gaddr, pte);
+ gmap_shadow_notify(sg, vmaddr, gaddr);
spin_unlock(&gmap->shadow_lock);
}
if (bits & PGSTE_IN_BIT)
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index c4d162a94be9..d5f9a2d1da1b 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -130,6 +130,7 @@ void mconsole_proc(struct mc_request *req)
struct file *file;
int first_chunk = 1;
char *ptr = req->request.data;
+ loff_t pos = 0;
ptr += strlen("proc");
ptr = skip_spaces(ptr);
@@ -148,7 +149,7 @@ void mconsole_proc(struct mc_request *req)
}
do {
- len = kernel_read(file, buf, PAGE_SIZE - 1, &file->f_pos);
+ len = kernel_read(file, buf, PAGE_SIZE - 1, &pos);
if (len < 0) {
mconsole_reply(req, "Read of file failed", 1, 0);
goto out_free;
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index abee6d2b9311..16c2c022540d 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -900,6 +900,9 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
hyperv_vector_handler)
+BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
+ hyperv_reenlightenment_intr)
+
#endif /* CONFIG_HYPERV */
ENTRY(page_fault)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 4a9bef6aca34..30c8c5344c4a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1136,6 +1136,9 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
#if IS_ENABLED(CONFIG_HYPERV)
apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
hyperv_callback_vector hyperv_vector_handler
+
+apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
+ hyperv_reenlightenment_vector hyperv_reenlightenment_intr
#endif /* CONFIG_HYPERV */
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index a0a206556919..2edc49e7409b 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -18,6 +18,8 @@
*/
#include <linux/types.h>
+#include <asm/apic.h>
+#include <asm/desc.h>
#include <asm/hypervisor.h>
#include <asm/hyperv.h>
#include <asm/mshyperv.h>
@@ -37,6 +39,7 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return tsc_pg;
}
+EXPORT_SYMBOL_GPL(hv_get_tsc_page);
static u64 read_hv_clock_tsc(struct clocksource *arg)
{
@@ -101,6 +104,115 @@ static int hv_cpu_init(unsigned int cpu)
return 0;
}
+static void (*hv_reenlightenment_cb)(void);
+
+static void hv_reenlightenment_notify(struct work_struct *dummy)
+{
+ struct hv_tsc_emulation_status emu_status;
+
+ rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+
+ /* Don't issue the callback if TSC accesses are not emulated */
+ if (hv_reenlightenment_cb && emu_status.inprogress)
+ hv_reenlightenment_cb();
+}
+static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
+
+void hyperv_stop_tsc_emulation(void)
+{
+ u64 freq;
+ struct hv_tsc_emulation_status emu_status;
+
+ rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+ emu_status.inprogress = 0;
+ wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
+
+ rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
+ tsc_khz = div64_u64(freq, 1000);
+}
+EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
+
+static inline bool hv_reenlightenment_available(void)
+{
+ /*
+ * Check for required features and priviliges to make TSC frequency
+ * change notifications work.
+ */
+ return ms_hyperv.features & HV_X64_ACCESS_FREQUENCY_MSRS &&
+ ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
+ ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT;
+}
+
+__visible void __irq_entry hyperv_reenlightenment_intr(struct pt_regs *regs)
+{
+ entering_ack_irq();
+
+ inc_irq_stat(irq_hv_reenlightenment_count);
+
+ schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
+
+ exiting_irq();
+}
+
+void set_hv_tscchange_cb(void (*cb)(void))
+{
+ struct hv_reenlightenment_control re_ctrl = {
+ .vector = HYPERV_REENLIGHTENMENT_VECTOR,
+ .enabled = 1,
+ .target_vp = hv_vp_index[smp_processor_id()]
+ };
+ struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
+
+ if (!hv_reenlightenment_available()) {
+ pr_warn("Hyper-V: reenlightenment support is unavailable\n");
+ return;
+ }
+
+ hv_reenlightenment_cb = cb;
+
+ /* Make sure callback is registered before we write to MSRs */
+ wmb();
+
+ wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
+}
+EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
+
+void clear_hv_tscchange_cb(void)
+{
+ struct hv_reenlightenment_control re_ctrl;
+
+ if (!hv_reenlightenment_available())
+ return;
+
+ rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
+ re_ctrl.enabled = 0;
+ wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
+
+ hv_reenlightenment_cb = NULL;
+}
+EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
+
+static int hv_cpu_die(unsigned int cpu)
+{
+ struct hv_reenlightenment_control re_ctrl;
+ unsigned int new_cpu;
+
+ if (hv_reenlightenment_cb == NULL)
+ return 0;
+
+ rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ if (re_ctrl.target_vp == hv_vp_index[cpu]) {
+ /* Reassign to some other online CPU */
+ new_cpu = cpumask_any_but(cpu_online_mask, cpu);
+
+ re_ctrl.target_vp = hv_vp_index[new_cpu];
+ wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
+ }
+
+ return 0;
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -110,12 +222,19 @@ static int hv_cpu_init(unsigned int cpu)
*/
void hyperv_init(void)
{
- u64 guest_id;
+ u64 guest_id, required_msrs;
union hv_x64_msr_hypercall_contents hypercall_msr;
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return;
+ /* Absolutely required MSRs */
+ required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE |
+ HV_X64_MSR_VP_INDEX_AVAILABLE;
+
+ if ((ms_hyperv.features & required_msrs) != required_msrs)
+ return;
+
/* Allocate percpu VP index */
hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
GFP_KERNEL);
@@ -123,7 +242,7 @@ void hyperv_init(void)
return;
if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv_init:online",
- hv_cpu_init, NULL) < 0)
+ hv_cpu_init, hv_cpu_die) < 0)
goto free_vp_index;
/*
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 1d9199e1c2ad..0dfe4d3f74e2 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -210,6 +210,7 @@
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
index 51cc979dd364..7c341a74ec8c 100644
--- a/arch/x86/include/asm/hardirq.h
+++ b/arch/x86/include/asm/hardirq.h
@@ -38,6 +38,9 @@ typedef struct {
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
unsigned int irq_hv_callback_count;
#endif
+#if IS_ENABLED(CONFIG_HYPERV)
+ unsigned int irq_hv_reenlightenment_count;
+#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 67421f649cfa..e71c1120426b 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -103,7 +103,12 @@
#endif
#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
-#define LOCAL_TIMER_VECTOR 0xee
+
+#if IS_ENABLED(CONFIG_HYPERV)
+#define HYPERV_REENLIGHTENMENT_VECTOR 0xee
+#endif
+
+#define LOCAL_TIMER_VECTOR 0xed
#define NR_VECTORS 256
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 516798431328..dd6f57a54a26 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -86,7 +86,7 @@
| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
- | X86_CR4_SMAP | X86_CR4_PKE))
+ | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -504,6 +504,7 @@ struct kvm_vcpu_arch {
int mp_state;
u64 ia32_misc_enable_msr;
u64 smbase;
+ u64 smi_count;
bool tpr_access_reporting;
u64 ia32_xss;
@@ -760,6 +761,15 @@ enum kvm_irqchip_mode {
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
};
+struct kvm_sev_info {
+ bool active; /* SEV enabled guest */
+ unsigned int asid; /* ASID used for this guest */
+ unsigned int handle; /* SEV firmware handle */
+ int fd; /* SEV device fd */
+ unsigned long pages_locked; /* Number of pages locked */
+ struct list_head regions_list; /* List of registered regions */
+};
+
struct kvm_arch {
unsigned int n_used_mmu_pages;
unsigned int n_requested_mmu_pages;
@@ -847,6 +857,8 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+ struct kvm_sev_info sev_info;
};
struct kvm_vm_stat {
@@ -883,7 +895,6 @@ struct kvm_vcpu_stat {
u64 request_irq_exits;
u64 irq_exits;
u64 host_state_reload;
- u64 efer_reload;
u64 fpu_reload;
u64 insn_emulation;
u64 insn_emulation_fail;
@@ -965,7 +976,7 @@ struct kvm_x86_ops {
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
- void (*tlb_flush)(struct kvm_vcpu *vcpu);
+ void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
void (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu);
@@ -1017,6 +1028,7 @@ struct kvm_x86_ops {
void (*handle_external_intr)(struct kvm_vcpu *vcpu);
bool (*mpx_supported)(void);
bool (*xsaves_supported)(void);
+ bool (*umip_emulated)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
@@ -1079,6 +1091,10 @@ struct kvm_x86_ops {
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
int (*enable_smi_window)(struct kvm_vcpu *vcpu);
+
+ int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
+ int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+ int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
};
struct kvm_arch_async_pf {
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index b52af150cbd8..25283f7eb299 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -160,6 +160,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
void hyperv_callback_vector(void);
+void hyperv_reenlightenment_vector(void);
#ifdef CONFIG_TRACING
#define trace_hyperv_callback_vector hyperv_callback_vector
#endif
@@ -316,18 +317,27 @@ void hyper_alloc_mmu(void);
void hyperv_report_panic(struct pt_regs *regs, long err);
bool hv_is_hyperv_initialized(void);
void hyperv_cleanup(void);
+
+void hyperv_reenlightenment_intr(struct pt_regs *regs);
+void set_hv_tscchange_cb(void (*cb)(void));
+void clear_hv_tscchange_cb(void);
+void hyperv_stop_tsc_emulation(void);
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline void hyperv_cleanup(void) {}
static inline void hyperv_setup_mmu_ops(void) {}
+static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
+static inline void clear_hv_tscchange_cb(void) {}
+static inline void hyperv_stop_tsc_emulation(void) {};
#endif /* CONFIG_HYPERV */
#ifdef CONFIG_HYPERV_TSCPAGE
struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
-static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
+static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc)
{
- u64 scale, offset, cur_tsc;
+ u64 scale, offset;
u32 sequence;
/*
@@ -358,7 +368,7 @@ static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
scale = READ_ONCE(tsc_pg->tsc_scale);
offset = READ_ONCE(tsc_pg->tsc_offset);
- cur_tsc = rdtsc_ordered();
+ *cur_tsc = rdtsc_ordered();
/*
* Make sure we read sequence after we read all other values
@@ -368,7 +378,14 @@ static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
} while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
- return mul_u64_u64_shr(cur_tsc, scale, 64) + offset;
+ return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
+}
+
+static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
+{
+ u64 cur_tsc;
+
+ return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
}
#else
@@ -376,5 +393,12 @@ static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
{
return NULL;
}
+
+static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc)
+{
+ BUG();
+ return U64_MAX;
+}
#endif
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e520a1e6fc11..c9084dedfcfa 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -397,6 +397,8 @@
#define MSR_K7_PERFCTR3 0xc0010007
#define MSR_K7_CLK_CTL 0xc001001b
#define MSR_K7_HWCR 0xc0010015
+#define MSR_K7_HWCR_SMMLOCK_BIT 0
+#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 8a3ee355b422..92015c65fa2a 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -22,4 +22,6 @@ int io_reserve_memtype(resource_size_t start, resource_size_t end,
void io_free_memtype(resource_size_t start, resource_size_t end);
+bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);
+
#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 78dd9df88157..0487ac054870 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -146,6 +146,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
+#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
+
struct __attribute__ ((__packed__)) vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 1a5bfead93b4..197c2e6c7376 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -40,6 +40,9 @@
*/
#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11)
+/* AccessReenlightenmentControls privilege */
+#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13)
+
/*
* Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
* and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
@@ -234,6 +237,30 @@
#define HV_X64_MSR_CRASH_PARAMS \
(1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+/* TSC emulation after migration */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+
+struct hv_reenlightenment_control {
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 enabled:1;
+ u64 reserved2:15;
+ u64 target_vp:32;
+};
+
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+
+struct hv_tsc_emulation_control {
+ u64 enabled:1;
+ u64 reserved:63;
+};
+
+struct hv_tsc_emulation_status {
+ u64 inprogress:1;
+ u64 reserved:63;
+};
+
#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 09cc06483bed..7a2ade4aa235 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -25,6 +25,7 @@
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
#define KVM_FEATURE_PV_UNHALT 7
+#define KVM_FEATURE_PV_TLB_FLUSH 9
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
@@ -51,6 +52,9 @@ struct kvm_steal_time {
__u32 pad[11];
};
+#define KVM_VCPU_PREEMPTED (1 << 0)
+#define KVM_VCPU_FLUSH_TLB (1 << 1)
+
#define KVM_CLOCK_PAIRING_WALLCLOCK 0
struct kvm_clock_pairing {
__s64 sec;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index ec3a286163c3..2aa92094b59d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -36,6 +36,7 @@
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/efi-bgrt.h>
+#include <linux/serial_core.h>
#include <asm/e820/api.h>
#include <asm/irqdomain.h>
@@ -1625,6 +1626,8 @@ int __init acpi_boot_init(void)
if (!acpi_noirq)
x86_init.pci.init = pci_acpi_init;
+ /* Do not enable ACPI SPCR console by default */
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
return 0;
}
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index ab1865342002..dc0ca8e29c75 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -2389,6 +2389,7 @@ static int __init apm_init(void)
if (HZ != 100)
idle_period = (idle_period * HZ) / 100;
if (idle_threshold < 100) {
+ cpuidle_poll_state_init(&apm_idle_driver);
if (!cpuidle_register_driver(&apm_idle_driver))
if (cpuidle_register_device(&apm_cpuidle_device))
cpuidle_unregister_driver(&apm_idle_driver);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ea831c858195..5bddbdcbc4a3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -556,6 +556,51 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
}
}
+static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
+{
+ u64 msr;
+
+ /*
+ * BIOS support is required for SME and SEV.
+ * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
+ * the SME physical address space reduction value.
+ * If BIOS has not enabled SME then don't advertise the
+ * SME feature (set in scattered.c).
+ * For SEV: If BIOS has not enabled SEV then don't advertise the
+ * SEV feature (set in scattered.c).
+ *
+ * In all cases, since support for SME and SEV requires long mode,
+ * don't advertise the feature under CONFIG_X86_32.
+ */
+ if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
+ /* Check if memory encryption is enabled */
+ rdmsrl(MSR_K8_SYSCFG, msr);
+ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ goto clear_all;
+
+ /*
+ * Always adjust physical address bits. Even though this
+ * will be a value above 32-bits this is still done for
+ * CONFIG_X86_32 so that accurate values are reported.
+ */
+ c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+
+ if (IS_ENABLED(CONFIG_X86_32))
+ goto clear_all;
+
+ rdmsrl(MSR_K7_HWCR, msr);
+ if (!(msr & MSR_K7_HWCR_SMMLOCK))
+ goto clear_sev;
+
+ return;
+
+clear_all:
+ clear_cpu_cap(c, X86_FEATURE_SME);
+clear_sev:
+ clear_cpu_cap(c, X86_FEATURE_SEV);
+ }
+}
+
static void early_init_amd(struct cpuinfo_x86 *c)
{
u32 dummy;
@@ -627,26 +672,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_E400);
- /*
- * BIOS support is required for SME. If BIOS has enabled SME then
- * adjust x86_phys_bits by the SME physical address space reduction
- * value. If BIOS has not enabled SME then don't advertise the
- * feature (set in scattered.c). Also, since the SME support requires
- * long mode, don't advertise the feature under CONFIG_X86_32.
- */
- if (cpu_has(c, X86_FEATURE_SME)) {
- u64 msr;
-
- /* Check if SME is enabled */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
- c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
- if (IS_ENABLED(CONFIG_X86_32))
- clear_cpu_cap(c, X86_FEATURE_SME);
- } else {
- clear_cpu_cap(c, X86_FEATURE_SME);
- }
- }
+ early_detect_mem_encrypt(c);
}
static void init_amd_k8(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 85eb5fc180c8..9340f41ce8d3 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -251,6 +251,12 @@ static void __init ms_hyperv_init_platform(void)
hyperv_setup_mmu_ops();
/* Setup the IDT for hypervisor callback */
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+
+ /* Setup the IDT for reenlightenment notifications */
+ if (ms_hyperv.features & HV_X64_ACCESS_REENLIGHTENMENT)
+ alloc_intr_gate(HYPERV_REENLIGHTENMENT_VECTOR,
+ hyperv_reenlightenment_vector);
+
#endif
}
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 4075d2be5357..772c219b6889 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -30,6 +30,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
+ { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 68e1867cca80..45fb4d2565f8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -142,6 +142,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Hypervisor callback interrupts\n");
}
#endif
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
+ seq_printf(p, "%*s: ", prec, "HRE");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ",
+ irq_stats(j)->irq_hv_reenlightenment_count);
+ seq_puts(p, " Hyper-V reenlightenment interrupts\n");
+ }
+#endif
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
#if defined(CONFIG_X86_IO_APIC)
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index b40ffbf156c1..4e37d1a851a6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -498,6 +498,34 @@ static void __init kvm_apf_trap_init(void)
update_intr_gate(X86_TRAP_PF, async_page_fault);
}
+static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
+
+static void kvm_flush_tlb_others(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ u8 state;
+ int cpu;
+ struct kvm_steal_time *src;
+ struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
+
+ cpumask_copy(flushmask, cpumask);
+ /*
+ * We have to call flush only on online vCPUs. And
+ * queue flush_on_enter for pre-empted vCPUs
+ */
+ for_each_cpu(cpu, flushmask) {
+ src = &per_cpu(steal_time, cpu);
+ state = READ_ONCE(src->preempted);
+ if ((state & KVM_VCPU_PREEMPTED)) {
+ if (try_cmpxchg(&src->preempted, &state,
+ state | KVM_VCPU_FLUSH_TLB))
+ __cpumask_clear_cpu(cpu, flushmask);
+ }
+ }
+
+ native_flush_tlb_others(flushmask, info);
+}
+
static void __init kvm_guest_init(void)
{
int i;
@@ -517,6 +545,9 @@ static void __init kvm_guest_init(void)
pv_time_ops.steal_clock = kvm_steal_clock;
}
+ if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH))
+ pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
+
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);
@@ -598,6 +629,22 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
+static __init int kvm_setup_pv_tlb_flush(void)
+{
+ int cpu;
+
+ if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) {
+ for_each_possible_cpu(cpu) {
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ }
+ pr_info("KVM setup pv remote TLB flush\n");
+ }
+
+ return 0;
+}
+arch_initcall(kvm_setup_pv_tlb_flush);
+
#ifdef CONFIG_PARAVIRT_SPINLOCKS
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
@@ -643,7 +690,7 @@ __visible bool __kvm_vcpu_is_preempted(long cpu)
{
struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
- return !!src->preempted;
+ return !!(src->preempted & KVM_VCPU_PREEMPTED);
}
PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 3df51c287844..92fd433c50b9 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -81,6 +81,14 @@ config KVM_AMD
To compile this as a module, choose M here: the module
will be called kvm-amd.
+config KVM_AMD_SEV
+ def_bool y
+ bool "AMD Secure Encrypted Virtualization (SEV) support"
+ depends on KVM_AMD && X86_64
+ depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+ ---help---
+ Provides support for launching Encrypted VMs on AMD processors.
+
config KVM_MMU_AUDIT
bool "Audit KVM MMU"
depends on KVM && TRACEPOINTS
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 13f5d4217e4f..a0c5a69bc7c4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -291,13 +291,18 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
{
switch (func) {
case 0:
- entry->eax = 1; /* only one leaf currently */
+ entry->eax = 7;
++*nent;
break;
case 1:
entry->ecx = F(MOVBE);
++*nent;
break;
+ case 7:
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ if (index == 0)
+ entry->ecx = F(RDPID);
+ ++*nent;
default:
break;
}
@@ -325,6 +330,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
+ unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -363,7 +369,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
- 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+ 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
+ F(TOPOEXT);
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
@@ -389,8 +396,9 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ecx*/
const u32 kvm_cpuid_7_0_ecx_x86_features =
- F(AVX512VBMI) | F(LA57) | F(PKU) |
- 0 /*OSPKE*/ | F(AVX512_VPOPCNTDQ);
+ F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+ F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
+ F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG);
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
@@ -476,6 +484,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
entry->ecx &= ~F(PKU);
@@ -597,7 +606,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_PV_EOI) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
- (1 << KVM_FEATURE_PV_UNHALT);
+ (1 << KVM_FEATURE_PV_UNHALT) |
+ (1 << KVM_FEATURE_PV_TLB_FLUSH);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
@@ -607,7 +617,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
entry->edx = 0;
break;
case 0x80000000:
- entry->eax = min(entry->eax, 0x8000001a);
+ entry->eax = min(entry->eax, 0x8000001f);
break;
case 0x80000001:
entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 290ecf711aec..d91eaeb01034 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -3533,6 +3533,16 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_rdpid(struct x86_emulate_ctxt *ctxt)
+{
+ u64 tsc_aux = 0;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
+ return emulate_gp(ctxt, 0);
+ ctxt->dst.val = tsc_aux;
+ return X86EMUL_CONTINUE;
+}
+
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
@@ -3652,17 +3662,27 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
+static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
{
- if (ctxt->modrm_reg > VCPU_SREG_GS)
- return emulate_ud(ctxt);
+ if (segment > VCPU_SREG_GS &&
+ (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+ ctxt->ops->cpl(ctxt) > 0)
+ return emulate_gp(ctxt, 0);
- ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
+ ctxt->dst.val = get_segment_selector(ctxt, segment);
if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
return X86EMUL_CONTINUE;
}
+static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
+{
+ if (ctxt->modrm_reg > VCPU_SREG_GS)
+ return emulate_ud(ctxt);
+
+ return em_store_sreg(ctxt, ctxt->modrm_reg);
+}
+
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
@@ -3678,6 +3698,11 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
+static int em_sldt(struct x86_emulate_ctxt *ctxt)
+{
+ return em_store_sreg(ctxt, VCPU_SREG_LDTR);
+}
+
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
@@ -3687,6 +3712,11 @@ static int em_lldt(struct x86_emulate_ctxt *ctxt)
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
+static int em_str(struct x86_emulate_ctxt *ctxt)
+{
+ return em_store_sreg(ctxt, VCPU_SREG_TR);
+}
+
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
@@ -3739,6 +3769,10 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
{
struct desc_ptr desc_ptr;
+ if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+ ctxt->ops->cpl(ctxt) > 0)
+ return emulate_gp(ctxt, 0);
+
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
@@ -3798,6 +3832,10 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt)
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
+ if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
+ ctxt->ops->cpl(ctxt) > 0)
+ return emulate_gp(ctxt, 0);
+
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
@@ -4383,8 +4421,8 @@ static const struct opcode group5[] = {
};
static const struct opcode group6[] = {
- DI(Prot | DstMem, sldt),
- DI(Prot | DstMem, str),
+ II(Prot | DstMem, em_sldt, sldt),
+ II(Prot | DstMem, em_str, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
@@ -4415,10 +4453,20 @@ static const struct opcode group8[] = {
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
+/*
+ * The "memory" destination is actually always a register, since we come
+ * from the register case of group9.
+ */
+static const struct gprefix pfx_0f_c7_7 = {
+ N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
+};
+
+
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
- N, N, N, N, N, N, N, N,
+ N, N, N, N, N, N, N,
+ GP(0, &pfx_0f_c7_7),
} };
static const struct opcode group11[] = {
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 5c24811e8b0b..f171051eecf3 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -79,7 +79,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
if (kvm_cpu_has_extint(v))
return 1;
- if (kvm_vcpu_apicv_active(v))
+ if (!is_guest_mode(v) && kvm_vcpu_apicv_active(v))
return 0;
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index e2c1fb8d35ce..924ac8ce9d50 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -364,32 +364,41 @@ static u8 count_vectors(void *bitmap)
return count;
}
-int __kvm_apic_update_irr(u32 *pir, void *regs)
+bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
{
u32 i, vec;
- u32 pir_val, irr_val;
- int max_irr = -1;
+ u32 pir_val, irr_val, prev_irr_val;
+ int max_updated_irr;
+
+ max_updated_irr = -1;
+ *max_irr = -1;
for (i = vec = 0; i <= 7; i++, vec += 32) {
pir_val = READ_ONCE(pir[i]);
irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
if (pir_val) {
+ prev_irr_val = irr_val;
irr_val |= xchg(&pir[i], 0);
*((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
+ if (prev_irr_val != irr_val) {
+ max_updated_irr =
+ __fls(irr_val ^ prev_irr_val) + vec;
+ }
}
if (irr_val)
- max_irr = __fls(irr_val) + vec;
+ *max_irr = __fls(irr_val) + vec;
}
- return max_irr;
+ return ((max_updated_irr != -1) &&
+ (max_updated_irr == *max_irr));
}
EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
-int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- return __kvm_apic_update_irr(pir, apic->regs);
+ return __kvm_apic_update_irr(pir, apic->regs, max_irr);
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
@@ -581,7 +590,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
- if (kvm_x86_ops->sync_pir_to_irr && apic->vcpu->arch.apicv_active)
+ if (apic->vcpu->arch.apicv_active)
highest_irr = kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 4b9935a38347..56c36014f7b7 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -75,8 +75,8 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, unsigned int dest, int dest_mode);
-int __kvm_apic_update_irr(u32 *pir, void *regs);
-int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
+bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr);
+bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr);
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
struct dest_map *dest_map);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2b8eb4da4d08..8eca1d04aeb8 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -42,6 +42,7 @@
#include <linux/kern_levels.h>
#include <asm/page.h>
+#include <asm/pat.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/vmx.h>
@@ -381,7 +382,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
-void kvm_mmu_clear_all_pte_masks(void)
+static void kvm_mmu_clear_all_pte_masks(void)
{
shadow_user_mask = 0;
shadow_accessed_mask = 0;
@@ -2708,7 +2709,18 @@ static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
- return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn));
+ return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
+ /*
+ * Some reserved pages, such as those from NVDIMM
+ * DAX devices, are not for MMIO, and can be mapped
+ * with cached memory type for better performance.
+ * However, the above check misconceives those pages
+ * as MMIO, and results in KVM mapping them with UC
+ * memory type, which would hurt the performance.
+ * Therefore, we check the host memory type in addition
+ * and only treat UC/UC-/WC pages as MMIO.
+ */
+ (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
return true;
}
@@ -4951,6 +4963,16 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
emulate:
+ /*
+ * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
+ * This can happen if a guest gets a page-fault on data access but the HW
+ * table walker is not able to read the instruction page (e.g instruction
+ * page is not present in memory). In those cases we simply restart the
+ * guest.
+ */
+ if (unlikely(insn && !insn_len))
+ return 1;
+
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
switch (er) {
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index d22ddbdf5e6e..1272861e77b9 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -19,7 +19,7 @@
#include <linux/ratelimit.h>
-char const *audit_point_name[] = {
+static char const *audit_point_name[] = {
"pre page fault",
"post page fault",
"pre pte write",
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 4e3c79530526..b3e488a74828 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -37,6 +37,10 @@
#include <linux/amd-iommu.h>
#include <linux/hashtable.h>
#include <linux/frame.h>
+#include <linux/psp-sev.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
#include <asm/apic.h>
#include <asm/perf_event.h>
@@ -214,6 +218,9 @@ struct vcpu_svm {
*/
struct list_head ir_list;
spinlock_t ir_list_lock;
+
+ /* which host CPU was used for running this vcpu */
+ unsigned int last_cpu;
};
/*
@@ -289,8 +296,12 @@ module_param(vls, int, 0444);
static int vgif = true;
module_param(vgif, int, 0444);
+/* enable/disable SEV support */
+static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
+module_param(sev, int, 0444);
+
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm);
@@ -324,6 +335,38 @@ enum {
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+static unsigned int max_sev_asid;
+static unsigned int min_sev_asid;
+static unsigned long *sev_asid_bitmap;
+#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
+struct enc_region {
+ struct list_head list;
+ unsigned long npages;
+ struct page **pages;
+ unsigned long uaddr;
+ unsigned long size;
+};
+
+static inline bool svm_sev_enabled(void)
+{
+ return max_sev_asid;
+}
+
+static inline bool sev_guest(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ return sev->active;
+}
+
+static inline int sev_get_asid(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ return sev->asid;
+}
+
static inline void mark_all_dirty(struct vmcb *vmcb)
{
vmcb->control.clean = 0;
@@ -530,10 +573,14 @@ struct svm_cpu_data {
u64 asid_generation;
u32 max_asid;
u32 next_asid;
+ u32 min_asid;
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
struct vmcb *current_vmcb;
+
+ /* index = sev_asid, value = vmcb pointer */
+ struct vmcb **sev_vmcbs;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -788,6 +835,7 @@ static int svm_hardware_enable(void)
sd->asid_generation = 1;
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
+ sd->min_asid = max_sev_asid + 1;
gdt = get_current_gdt_rw();
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
@@ -846,6 +894,7 @@ static void svm_cpu_uninit(int cpu)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
+ kfree(sd->sev_vmcbs);
__free_page(sd->save_area);
kfree(sd);
}
@@ -859,11 +908,18 @@ static int svm_cpu_init(int cpu)
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
- sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
+ sd->save_area = alloc_page(GFP_KERNEL);
if (!sd->save_area)
goto err_1;
+ if (svm_sev_enabled()) {
+ r = -ENOMEM;
+ sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
+ if (!sd->sev_vmcbs)
+ goto err_1;
+ }
+
per_cpu(svm_data, cpu) = sd;
return 0;
@@ -1070,6 +1126,48 @@ static int avic_ga_log_notifier(u32 ga_tag)
return 0;
}
+static __init int sev_hardware_setup(void)
+{
+ struct sev_user_data_status *status;
+ int rc;
+
+ /* Maximum number of encrypted guests supported simultaneously */
+ max_sev_asid = cpuid_ecx(0x8000001F);
+
+ if (!max_sev_asid)
+ return 1;
+
+ /* Minimum ASID value that should be used for SEV guest */
+ min_sev_asid = cpuid_edx(0x8000001F);
+
+ /* Initialize SEV ASID bitmap */
+ sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!sev_asid_bitmap)
+ return 1;
+
+ status = kmalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return 1;
+
+ /*
+ * Check SEV platform status.
+ *
+ * PLATFORM_STATUS can be called in any state, if we failed to query
+ * the PLATFORM status then either PSP firmware does not support SEV
+ * feature or SEV firmware is dead.
+ */
+ rc = sev_platform_status(status, NULL);
+ if (rc)
+ goto err;
+
+ pr_info("SEV supported\n");
+
+err:
+ kfree(status);
+ return rc;
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -1105,6 +1203,17 @@ static __init int svm_hardware_setup(void)
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
}
+ if (sev) {
+ if (boot_cpu_has(X86_FEATURE_SEV) &&
+ IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
+ r = sev_hardware_setup();
+ if (r)
+ sev = false;
+ } else {
+ sev = false;
+ }
+ }
+
for_each_possible_cpu(cpu) {
r = svm_cpu_init(cpu);
if (r)
@@ -1166,6 +1275,9 @@ static __exit void svm_hardware_unsetup(void)
{
int cpu;
+ if (svm_sev_enabled())
+ kfree(sev_asid_bitmap);
+
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
@@ -1318,7 +1430,7 @@ static void init_vmcb(struct vcpu_svm *svm)
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
- control->nested_ctl = 1;
+ control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
@@ -1356,6 +1468,11 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
}
+ if (sev_guest(svm->vcpu.kvm)) {
+ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
+ clr_exception_intercept(svm, UD_VECTOR);
+ }
+
mark_all_dirty(svm->vmcb);
enable_gif(svm);
@@ -1438,6 +1555,179 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+static void __sev_asid_free(int asid)
+{
+ struct svm_cpu_data *sd;
+ int cpu, pos;
+
+ pos = asid - 1;
+ clear_bit(pos, sev_asid_bitmap);
+
+ for_each_possible_cpu(cpu) {
+ sd = per_cpu(svm_data, cpu);
+ sd->sev_vmcbs[pos] = NULL;
+ }
+}
+
+static void sev_asid_free(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ __sev_asid_free(sev->asid);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
+ struct sev_data_decommission *decommission;
+ struct sev_data_deactivate *data;
+
+ if (!handle)
+ return;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return;
+
+ /* deactivate handle */
+ data->handle = handle;
+ sev_guest_deactivate(data, NULL);
+
+ wbinvd_on_all_cpus();
+ sev_guest_df_flush(NULL);
+ kfree(data);
+
+ decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
+ if (!decommission)
+ return;
+
+ /* decommission handle */
+ decommission->handle = handle;
+ sev_guest_decommission(decommission, NULL);
+
+ kfree(decommission);
+}
+
+static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n,
+ int write)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ unsigned long npages, npinned, size;
+ unsigned long locked, lock_limit;
+ struct page **pages;
+ int first, last;
+
+ /* Calculate number of pages. */
+ first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ npages = (last - first + 1);
+
+ locked = sev->pages_locked + npages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
+ pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
+ return NULL;
+ }
+
+ /* Avoid using vmalloc for smaller buffers. */
+ size = npages * sizeof(struct page *);
+ if (size > PAGE_SIZE)
+ pages = vmalloc(size);
+ else
+ pages = kmalloc(size, GFP_KERNEL);
+
+ if (!pages)
+ return NULL;
+
+ /* Pin the user virtual address. */
+ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
+ if (npinned != npages) {
+ pr_err("SEV: Failure locking %lu pages.\n", npages);
+ goto err;
+ }
+
+ *n = npages;
+ sev->pages_locked = locked;
+
+ return pages;
+
+err:
+ if (npinned > 0)
+ release_pages(pages, npinned);
+
+ kvfree(pages);
+ return NULL;
+}
+
+static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+ unsigned long npages)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ release_pages(pages, npages);
+ kvfree(pages);
+ sev->pages_locked -= npages;
+}
+
+static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+{
+ uint8_t *page_virtual;
+ unsigned long i;
+
+ if (npages == 0 || pages == NULL)
+ return;
+
+ for (i = 0; i < npages; i++) {
+ page_virtual = kmap_atomic(pages[i]);
+ clflush_cache_range(page_virtual, PAGE_SIZE);
+ kunmap_atomic(page_virtual);
+ }
+}
+
+static void __unregister_enc_region_locked(struct kvm *kvm,
+ struct enc_region *region)
+{
+ /*
+ * The guest may change the memory encryption attribute from C=0 -> C=1
+ * or vice versa for this memory range. Lets make sure caches are
+ * flushed to ensure that guest data gets written into memory with
+ * correct C-bit.
+ */
+ sev_clflush_pages(region->pages, region->npages);
+
+ sev_unpin_memory(kvm, region->pages, region->npages);
+ list_del(&region->list);
+ kfree(region);
+}
+
+static void sev_vm_destroy(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct list_head *pos, *q;
+
+ if (!sev_guest(kvm))
+ return;
+
+ mutex_lock(&kvm->lock);
+
+ /*
+ * if userspace was terminated before unregistering the memory regions
+ * then lets unpin all the registered memory.
+ */
+ if (!list_empty(head)) {
+ list_for_each_safe(pos, q, head) {
+ __unregister_enc_region_locked(kvm,
+ list_entry(pos, struct enc_region, list));
+ }
+ }
+
+ mutex_unlock(&kvm->lock);
+
+ sev_unbind_asid(kvm, sev->handle);
+ sev_asid_free(kvm);
+}
+
static void avic_vm_destroy(struct kvm *kvm)
{
unsigned long flags;
@@ -1456,6 +1746,12 @@ static void avic_vm_destroy(struct kvm *kvm)
spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
}
+static void svm_vm_destroy(struct kvm *kvm)
+{
+ avic_vm_destroy(kvm);
+ sev_vm_destroy(kvm);
+}
+
static int avic_vm_init(struct kvm *kvm)
{
unsigned long flags;
@@ -2066,7 +2362,7 @@ static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
- svm_flush_tlb(vcpu);
+ svm_flush_tlb(vcpu, true);
vcpu->arch.cr4 = cr4;
if (!npt_enabled)
@@ -2125,7 +2421,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
if (sd->next_asid > sd->max_asid) {
++sd->asid_generation;
- sd->next_asid = 1;
+ sd->next_asid = sd->min_asid;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
@@ -2173,22 +2469,24 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
static int pf_interception(struct vcpu_svm *svm)
{
- u64 fault_address = svm->vmcb->control.exit_info_2;
+ u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
u64 error_code = svm->vmcb->control.exit_info_1;
return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
- svm->vmcb->control.insn_bytes,
+ static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
+ svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len);
}
static int npf_interception(struct vcpu_svm *svm)
{
- u64 fault_address = svm->vmcb->control.exit_info_2;
+ u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
u64 error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
- svm->vmcb->control.insn_bytes,
+ static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
+ svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len);
}
@@ -2415,7 +2713,7 @@ static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
svm->vmcb->control.nested_cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_NPT);
- svm_flush_tlb(vcpu);
+ svm_flush_tlb(vcpu, true);
}
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
@@ -2957,7 +3255,8 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
if (vmcb->control.asid == 0)
return false;
- if (vmcb->control.nested_ctl && !npt_enabled)
+ if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
+ !npt_enabled)
return false;
return true;
@@ -2971,7 +3270,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
- if (nested_vmcb->control.nested_ctl) {
+ if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
@@ -3019,7 +3318,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept;
- svm_flush_tlb(&svm->vcpu);
+ svm_flush_tlb(&svm->vcpu, true);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -4442,12 +4741,39 @@ static void reload_tss(struct kvm_vcpu *vcpu)
load_TR_desc();
}
+static void pre_sev_run(struct vcpu_svm *svm, int cpu)
+{
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ int asid = sev_get_asid(svm->vcpu.kvm);
+
+ /* Assign the asid allocated with this SEV guest */
+ svm->vmcb->control.asid = asid;
+
+ /*
+ * Flush guest TLB:
+ *
+ * 1) when different VMCB for the same ASID is to be run on the same host CPU.
+ * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
+ */
+ if (sd->sev_vmcbs[asid] == svm->vmcb &&
+ svm->last_cpu == cpu)
+ return;
+
+ svm->last_cpu = cpu;
+ sd->sev_vmcbs[asid] = svm->vmcb;
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+ mark_dirty(svm->vmcb, VMCB_ASID);
+}
+
static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+ if (sev_guest(svm->vcpu.kvm))
+ return pre_sev_run(svm, cpu);
+
/* FIXME: handle wraparound of asid_generation */
if (svm->asid_generation != sd->asid_generation)
new_asid(svm, sd);
@@ -4865,7 +5191,7 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
return 0;
}
-static void svm_flush_tlb(struct kvm_vcpu *vcpu)
+static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -5208,7 +5534,7 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = __sme_set(root);
mark_dirty(svm->vmcb, VMCB_CR);
- svm_flush_tlb(vcpu);
+ svm_flush_tlb(vcpu, true);
}
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -5222,7 +5548,7 @@ static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
mark_dirty(svm->vmcb, VMCB_CR);
- svm_flush_tlb(vcpu);
+ svm_flush_tlb(vcpu, true);
}
static int is_disabled(void)
@@ -5308,6 +5634,12 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
entry->edx |= SVM_FEATURE_NPT;
break;
+ case 0x8000001F:
+ /* Support memory encryption cpuid if host supports it */
+ if (boot_cpu_has(X86_FEATURE_SEV))
+ cpuid(0x8000001f, &entry->eax, &entry->ebx,
+ &entry->ecx, &entry->edx);
+
}
}
@@ -5336,6 +5668,11 @@ static bool svm_xsaves_supported(void)
return false;
}
+static bool svm_umip_emulated(void)
+{
+ return false;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -5637,6 +5974,828 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static int sev_asid_new(void)
+{
+ int pos;
+
+ /*
+ * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
+ */
+ pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
+ if (pos >= max_sev_asid)
+ return -EBUSY;
+
+ set_bit(pos, sev_asid_bitmap);
+ return pos + 1;
+}
+
+static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ int asid, ret;
+
+ ret = -EBUSY;
+ asid = sev_asid_new();
+ if (asid < 0)
+ return ret;
+
+ ret = sev_platform_init(&argp->error);
+ if (ret)
+ goto e_free;
+
+ sev->active = true;
+ sev->asid = asid;
+ INIT_LIST_HEAD(&sev->regions_list);
+
+ return 0;
+
+e_free:
+ __sev_asid_free(asid);
+ return ret;
+}
+
+static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+{
+ struct sev_data_activate *data;
+ int asid = sev_get_asid(kvm);
+ int ret;
+
+ wbinvd_on_all_cpus();
+
+ ret = sev_guest_df_flush(error);
+ if (ret)
+ return ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* activate ASID on the given handle */
+ data->handle = handle;
+ data->asid = asid;
+ ret = sev_guest_activate(data, error);
+ kfree(data);
+
+ return ret;
+}
+
+static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+{
+ struct fd f;
+ int ret;
+
+ f = fdget(fd);
+ if (!f.file)
+ return -EBADF;
+
+ ret = sev_issue_cmd_external_user(f.file, id, data, error);
+
+ fdput(f);
+ return ret;
+}
+
+static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+ return __sev_issue_cmd(sev->fd, id, data, error);
+}
+
+static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_launch_start *start;
+ struct kvm_sev_launch_start params;
+ void *dh_blob, *session_blob;
+ int *error = &argp->error;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ start = kzalloc(sizeof(*start), GFP_KERNEL);
+ if (!start)
+ return -ENOMEM;
+
+ dh_blob = NULL;
+ if (params.dh_uaddr) {
+ dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
+ if (IS_ERR(dh_blob)) {
+ ret = PTR_ERR(dh_blob);
+ goto e_free;
+ }
+
+ start->dh_cert_address = __sme_set(__pa(dh_blob));
+ start->dh_cert_len = params.dh_len;
+ }
+
+ session_blob = NULL;
+ if (params.session_uaddr) {
+ session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
+ if (IS_ERR(session_blob)) {
+ ret = PTR_ERR(session_blob);
+ goto e_free_dh;
+ }
+
+ start->session_address = __sme_set(__pa(session_blob));
+ start->session_len = params.session_len;
+ }
+
+ start->handle = params.handle;
+ start->policy = params.policy;
+
+ /* create memory encryption context */
+ ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+ if (ret)
+ goto e_free_session;
+
+ /* Bind ASID to this guest */
+ ret = sev_bind_asid(kvm, start->handle, error);
+ if (ret)
+ goto e_free_session;
+
+ /* return handle to userspace */
+ params.handle = start->handle;
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
+ sev_unbind_asid(kvm, start->handle);
+ ret = -EFAULT;
+ goto e_free_session;
+ }
+
+ sev->handle = start->handle;
+ sev->fd = argp->sev_fd;
+
+e_free_session:
+ kfree(session_blob);
+e_free_dh:
+ kfree(dh_blob);
+e_free:
+ kfree(start);
+ return ret;
+}
+
+static int get_num_contig_pages(int idx, struct page **inpages,
+ unsigned long npages)
+{
+ unsigned long paddr, next_paddr;
+ int i = idx + 1, pages = 1;
+
+ /* find the number of contiguous pages starting from idx */
+ paddr = __sme_page_pa(inpages[idx]);
+ while (i < npages) {
+ next_paddr = __sme_page_pa(inpages[i++]);
+ if ((paddr + PAGE_SIZE) == next_paddr) {
+ pages++;
+ paddr = next_paddr;
+ continue;
+ }
+ break;
+ }
+
+ return pages;
+}
+
+static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct kvm_sev_launch_update_data params;
+ struct sev_data_launch_update_data *data;
+ struct page **inpages;
+ int i, ret, pages;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ vaddr = params.uaddr;
+ size = params.len;
+ vaddr_end = vaddr + size;
+
+ /* Lock the user memory. */
+ inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
+ if (!inpages) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ /*
+ * The LAUNCH_UPDATE command will perform in-place encryption of the
+ * memory content (i.e it will write the same memory region with C=1).
+ * It's possible that the cache may contain the data with C=0, i.e.,
+ * unencrypted so invalidate it first.
+ */
+ sev_clflush_pages(inpages, npages);
+
+ for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
+ int offset, len;
+
+ /*
+ * If the user buffer is not page-aligned, calculate the offset
+ * within the page.
+ */
+ offset = vaddr & (PAGE_SIZE - 1);
+
+ /* Calculate the number of pages that can be encrypted in one go. */
+ pages = get_num_contig_pages(i, inpages, npages);
+
+ len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
+
+ data->handle = sev->handle;
+ data->len = len;
+ data->address = __sme_page_pa(inpages[i]) + offset;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
+ if (ret)
+ goto e_unpin;
+
+ size -= len;
+ next_vaddr = vaddr + len;
+ }
+
+e_unpin:
+ /* content of memory is updated, mark pages dirty */
+ for (i = 0; i < npages; i++) {
+ set_page_dirty_lock(inpages[i]);
+ mark_page_accessed(inpages[i]);
+ }
+ /* unlock the user pages */
+ sev_unpin_memory(kvm, inpages, npages);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_launch_measure *data;
+ struct kvm_sev_launch_measure params;
+ void *blob = NULL;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* User wants to query the blob length */
+ if (!params.len)
+ goto cmd;
+
+ if (params.uaddr) {
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EINVAL;
+ goto e_free;
+ }
+
+ if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ ret = -ENOMEM;
+ blob = kmalloc(params.len, GFP_KERNEL);
+ if (!blob)
+ goto e_free;
+
+ data->address = __psp_pa(blob);
+ data->len = params.len;
+ }
+
+cmd:
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
+
+ /*
+ * If we query the session length, FW responded with expected data.
+ */
+ if (!params.len)
+ goto done;
+
+ if (ret)
+ goto e_free_blob;
+
+ if (blob) {
+ if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
+ ret = -EFAULT;
+ }
+
+done:
+ params.len = data->len;
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+ ret = -EFAULT;
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_launch_finish *data;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
+
+ kfree(data);
+ return ret;
+}
+
+static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct kvm_sev_guest_status params;
+ struct sev_data_guest_status *data;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
+ if (ret)
+ goto e_free;
+
+ params.policy = data->policy;
+ params.state = data->state;
+ params.handle = data->handle;
+
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
+ ret = -EFAULT;
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+ unsigned long dst, int size,
+ int *error, bool enc)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_dbg *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->handle = sev->handle;
+ data->dst_addr = dst;
+ data->src_addr = src;
+ data->len = size;
+
+ ret = sev_issue_cmd(kvm,
+ enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+ data, error);
+ kfree(data);
+ return ret;
+}
+
+static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
+ unsigned long dst_paddr, int sz, int *err)
+{
+ int offset;
+
+ /*
+ * Its safe to read more than we are asked, caller should ensure that
+ * destination has enough space.
+ */
+ src_paddr = round_down(src_paddr, 16);
+ offset = src_paddr & 15;
+ sz = round_up(sz + offset, 16);
+
+ return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
+}
+
+static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
+ unsigned long __user dst_uaddr,
+ unsigned long dst_paddr,
+ int size, int *err)
+{
+ struct page *tpage = NULL;
+ int ret, offset;
+
+ /* if inputs are not 16-byte then use intermediate buffer */
+ if (!IS_ALIGNED(dst_paddr, 16) ||
+ !IS_ALIGNED(paddr, 16) ||
+ !IS_ALIGNED(size, 16)) {
+ tpage = (void *)alloc_page(GFP_KERNEL);
+ if (!tpage)
+ return -ENOMEM;
+
+ dst_paddr = __sme_page_pa(tpage);
+ }
+
+ ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
+ if (ret)
+ goto e_free;
+
+ if (tpage) {
+ offset = paddr & 15;
+ if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
+ page_address(tpage) + offset, size))
+ ret = -EFAULT;
+ }
+
+e_free:
+ if (tpage)
+ __free_page(tpage);
+
+ return ret;
+}
+
+static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
+ unsigned long __user vaddr,
+ unsigned long dst_paddr,
+ unsigned long __user dst_vaddr,
+ int size, int *error)
+{
+ struct page *src_tpage = NULL;
+ struct page *dst_tpage = NULL;
+ int ret, len = size;
+
+ /* If source buffer is not aligned then use an intermediate buffer */
+ if (!IS_ALIGNED(vaddr, 16)) {
+ src_tpage = alloc_page(GFP_KERNEL);
+ if (!src_tpage)
+ return -ENOMEM;
+
+ if (copy_from_user(page_address(src_tpage),
+ (void __user *)(uintptr_t)vaddr, size)) {
+ __free_page(src_tpage);
+ return -EFAULT;
+ }
+
+ paddr = __sme_page_pa(src_tpage);
+ }
+
+ /*
+ * If destination buffer or length is not aligned then do read-modify-write:
+ * - decrypt destination in an intermediate buffer
+ * - copy the source buffer in an intermediate buffer
+ * - use the intermediate buffer as source buffer
+ */
+ if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+ int dst_offset;
+
+ dst_tpage = alloc_page(GFP_KERNEL);
+ if (!dst_tpage) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ ret = __sev_dbg_decrypt(kvm, dst_paddr,
+ __sme_page_pa(dst_tpage), size, error);
+ if (ret)
+ goto e_free;
+
+ /*
+ * If source is kernel buffer then use memcpy() otherwise
+ * copy_from_user().
+ */
+ dst_offset = dst_paddr & 15;
+
+ if (src_tpage)
+ memcpy(page_address(dst_tpage) + dst_offset,
+ page_address(src_tpage), size);
+ else {
+ if (copy_from_user(page_address(dst_tpage) + dst_offset,
+ (void __user *)(uintptr_t)vaddr, size)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+ }
+
+ paddr = __sme_page_pa(dst_tpage);
+ dst_paddr = round_down(dst_paddr, 16);
+ len = round_up(size, 16);
+ }
+
+ ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
+
+e_free:
+ if (src_tpage)
+ __free_page(src_tpage);
+ if (dst_tpage)
+ __free_page(dst_tpage);
+ return ret;
+}
+
+static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+{
+ unsigned long vaddr, vaddr_end, next_vaddr;
+ unsigned long dst_vaddr, dst_vaddr_end;
+ struct page **src_p, **dst_p;
+ struct kvm_sev_dbg debug;
+ unsigned long n;
+ int ret, size;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
+ return -EFAULT;
+
+ vaddr = debug.src_uaddr;
+ size = debug.len;
+ vaddr_end = vaddr + size;
+ dst_vaddr = debug.dst_uaddr;
+ dst_vaddr_end = dst_vaddr + size;
+
+ for (; vaddr < vaddr_end; vaddr = next_vaddr) {
+ int len, s_off, d_off;
+
+ /* lock userspace source and destination page */
+ src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
+ if (!src_p)
+ return -EFAULT;
+
+ dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
+ if (!dst_p) {
+ sev_unpin_memory(kvm, src_p, n);
+ return -EFAULT;
+ }
+
+ /*
+ * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
+ * memory content (i.e it will write the same memory region with C=1).
+ * It's possible that the cache may contain the data with C=0, i.e.,
+ * unencrypted so invalidate it first.
+ */
+ sev_clflush_pages(src_p, 1);
+ sev_clflush_pages(dst_p, 1);
+
+ /*
+ * Since user buffer may not be page aligned, calculate the
+ * offset within the page.
+ */
+ s_off = vaddr & ~PAGE_MASK;
+ d_off = dst_vaddr & ~PAGE_MASK;
+ len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+ if (dec)
+ ret = __sev_dbg_decrypt_user(kvm,
+ __sme_page_pa(src_p[0]) + s_off,
+ dst_vaddr,
+ __sme_page_pa(dst_p[0]) + d_off,
+ len, &argp->error);
+ else
+ ret = __sev_dbg_encrypt_user(kvm,
+ __sme_page_pa(src_p[0]) + s_off,
+ vaddr,
+ __sme_page_pa(dst_p[0]) + d_off,
+ dst_vaddr,
+ len, &argp->error);
+
+ sev_unpin_memory(kvm, src_p, 1);
+ sev_unpin_memory(kvm, dst_p, 1);
+
+ if (ret)
+ goto err;
+
+ next_vaddr = vaddr + len;
+ dst_vaddr = dst_vaddr + len;
+ size -= len;
+ }
+err:
+ return ret;
+}
+
+static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_launch_secret *data;
+ struct kvm_sev_launch_secret params;
+ struct page **pages;
+ void *blob, *hdr;
+ unsigned long n;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
+ return -EFAULT;
+
+ pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
+ if (!pages)
+ return -ENOMEM;
+
+ /*
+ * The secret must be copied into contiguous memory region, lets verify
+ * that userspace memory pages are contiguous before we issue command.
+ */
+ if (get_num_contig_pages(0, pages, n) != n) {
+ ret = -EINVAL;
+ goto e_unpin_memory;
+ }
+
+ ret = -ENOMEM;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ goto e_unpin_memory;
+
+ blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+ goto e_free;
+ }
+
+ data->trans_address = __psp_pa(blob);
+ data->trans_len = params.trans_len;
+
+ hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+ if (IS_ERR(hdr)) {
+ ret = PTR_ERR(hdr);
+ goto e_free_blob;
+ }
+ data->trans_address = __psp_pa(blob);
+ data->trans_len = params.trans_len;
+
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
+
+ kfree(hdr);
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+e_unpin_memory:
+ sev_unpin_memory(kvm, pages, n);
+ return ret;
+}
+
+static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+{
+ struct kvm_sev_cmd sev_cmd;
+ int r;
+
+ if (!svm_sev_enabled())
+ return -ENOTTY;
+
+ if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
+ return -EFAULT;
+
+ mutex_lock(&kvm->lock);
+
+ switch (sev_cmd.id) {
+ case KVM_SEV_INIT:
+ r = sev_guest_init(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_LAUNCH_START:
+ r = sev_launch_start(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_LAUNCH_UPDATE_DATA:
+ r = sev_launch_update_data(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_LAUNCH_MEASURE:
+ r = sev_launch_measure(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_LAUNCH_FINISH:
+ r = sev_launch_finish(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_GUEST_STATUS:
+ r = sev_guest_status(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_DBG_DECRYPT:
+ r = sev_dbg_crypt(kvm, &sev_cmd, true);
+ break;
+ case KVM_SEV_DBG_ENCRYPT:
+ r = sev_dbg_crypt(kvm, &sev_cmd, false);
+ break;
+ case KVM_SEV_LAUNCH_SECRET:
+ r = sev_launch_secret(kvm, &sev_cmd);
+ break;
+ default:
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
+ r = -EFAULT;
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+static int svm_register_enc_region(struct kvm *kvm,
+ struct kvm_enc_region *range)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct enc_region *region;
+ int ret = 0;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
+ if (!region->pages) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ /*
+ * The guest may change the memory encryption attribute from C=0 -> C=1
+ * or vice versa for this memory range. Lets make sure caches are
+ * flushed to ensure that guest data gets written into memory with
+ * correct C-bit.
+ */
+ sev_clflush_pages(region->pages, region->npages);
+
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ mutex_lock(&kvm->lock);
+ list_add_tail(&region->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
+ return ret;
+
+e_free:
+ kfree(region);
+ return ret;
+}
+
+static struct enc_region *
+find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
+{
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct enc_region *i;
+
+ list_for_each_entry(i, head, list) {
+ if (i->uaddr == range->addr &&
+ i->size == range->size)
+ return i;
+ }
+
+ return NULL;
+}
+
+
+static int svm_unregister_enc_region(struct kvm *kvm,
+ struct kvm_enc_region *range)
+{
+ struct enc_region *region;
+ int ret;
+
+ mutex_lock(&kvm->lock);
+
+ if (!sev_guest(kvm)) {
+ ret = -ENOTTY;
+ goto failed;
+ }
+
+ region = find_enc_region(kvm, range);
+ if (!region) {
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ __unregister_enc_region_locked(kvm, region);
+
+ mutex_unlock(&kvm->lock);
+ return 0;
+
+failed:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -5653,7 +6812,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vcpu_reset = svm_vcpu_reset,
.vm_init = avic_vm_init,
- .vm_destroy = avic_vm_destroy,
+ .vm_destroy = svm_vm_destroy,
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
@@ -5713,6 +6872,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_irr_update = svm_hwapic_irr_update,
.hwapic_isr_update = svm_hwapic_isr_update,
+ .sync_pir_to_irr = kvm_lapic_find_highest_irr,
.apicv_post_state_restore = avic_post_state_restore,
.set_tss_addr = svm_set_tss_addr,
@@ -5729,6 +6889,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.invpcid_supported = svm_invpcid_supported,
.mpx_supported = svm_mpx_supported,
.xsaves_supported = svm_xsaves_supported,
+ .umip_emulated = svm_umip_emulated,
.set_supported_cpuid = svm_set_supported_cpuid,
@@ -5752,6 +6913,10 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pre_enter_smm = svm_pre_enter_smm,
.pre_leave_smm = svm_pre_leave_smm,
.enable_smi_window = enable_smi_window,
+
+ .mem_enc_op = svm_mem_enc_op,
+ .mem_enc_reg_region = svm_register_enc_region,
+ .mem_enc_unreg_region = svm_unregister_enc_region,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bee4c49f6dd0..f427723dc7db 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -419,6 +419,12 @@ struct __packed vmcs12 {
#define VMCS12_SIZE 0x1000
/*
+ * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
+ * supported VMCS12 field encoding.
+ */
+#define VMCS12_MAX_FIELD_INDEX 0x17
+
+/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
*/
@@ -441,6 +447,7 @@ struct nested_vmx {
* data hold by vmcs12
*/
bool sync_shadow_vmcs;
+ bool dirty_vmcs12;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
@@ -664,6 +671,8 @@ struct vcpu_vmx {
u32 host_pkru;
+ unsigned long host_debugctlmsr;
+
/*
* Only bits masked by msr_ia32_feature_control_valid_bits can be set in
* msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
@@ -692,67 +701,24 @@ static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
return &(to_vmx(vcpu)->pi_desc);
}
+#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
-#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
-#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
- [number##_HIGH] = VMCS12_OFFSET(name)+4
+#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name)
+#define FIELD64(number, name) \
+ FIELD(number, name), \
+ [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
-static unsigned long shadow_read_only_fields[] = {
- /*
- * We do NOT shadow fields that are modified when L0
- * traps and emulates any vmx instruction (e.g. VMPTRLD,
- * VMXON...) executed by L1.
- * For example, VM_INSTRUCTION_ERROR is read
- * by L1 if a vmx instruction fails (part of the error path).
- * Note the code assumes this logic. If for some reason
- * we start shadowing these fields then we need to
- * force a shadow sync when L0 emulates vmx instructions
- * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
- * by nested_vmx_failValid)
- */
- VM_EXIT_REASON,
- VM_EXIT_INTR_INFO,
- VM_EXIT_INSTRUCTION_LEN,
- IDT_VECTORING_INFO_FIELD,
- IDT_VECTORING_ERROR_CODE,
- VM_EXIT_INTR_ERROR_CODE,
- EXIT_QUALIFICATION,
- GUEST_LINEAR_ADDRESS,
- GUEST_PHYSICAL_ADDRESS
+static u16 shadow_read_only_fields[] = {
+#define SHADOW_FIELD_RO(x) x,
+#include "vmx_shadow_fields.h"
};
static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
-static unsigned long shadow_read_write_fields[] = {
- TPR_THRESHOLD,
- GUEST_RIP,
- GUEST_RSP,
- GUEST_CR0,
- GUEST_CR3,
- GUEST_CR4,
- GUEST_INTERRUPTIBILITY_INFO,
- GUEST_RFLAGS,
- GUEST_CS_SELECTOR,
- GUEST_CS_AR_BYTES,
- GUEST_CS_LIMIT,
- GUEST_CS_BASE,
- GUEST_ES_BASE,
- GUEST_BNDCFGS,
- CR0_GUEST_HOST_MASK,
- CR0_READ_SHADOW,
- CR4_READ_SHADOW,
- TSC_OFFSET,
- EXCEPTION_BITMAP,
- CPU_BASED_VM_EXEC_CONTROL,
- VM_ENTRY_EXCEPTION_ERROR_CODE,
- VM_ENTRY_INTR_INFO_FIELD,
- VM_ENTRY_INSTRUCTION_LEN,
- VM_ENTRY_EXCEPTION_ERROR_CODE,
- HOST_FS_BASE,
- HOST_GS_BASE,
- HOST_FS_SELECTOR,
- HOST_GS_SELECTOR
+static u16 shadow_read_write_fields[] = {
+#define SHADOW_FIELD_RW(x) x,
+#include "vmx_shadow_fields.h"
};
static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
@@ -905,13 +871,17 @@ static inline short vmcs_field_to_offset(unsigned long field)
{
const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
unsigned short offset;
+ unsigned index;
+
+ if (field >> 15)
+ return -ENOENT;
- BUILD_BUG_ON(size > SHRT_MAX);
- if (field >= size)
+ index = ROL16(field, 6);
+ if (index >= size)
return -ENOENT;
- field = array_index_nospec(field, size);
- offset = vmcs_field_to_offset_table[field];
+ index = array_index_nospec(index, size);
+ offset = vmcs_field_to_offset_table[index];
if (offset == 0)
return -ENOENT;
return offset;
@@ -957,8 +927,6 @@ static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
enum {
- VMX_IO_BITMAP_A,
- VMX_IO_BITMAP_B,
VMX_VMREAD_BITMAP,
VMX_VMWRITE_BITMAP,
VMX_BITMAP_NR
@@ -966,8 +934,6 @@ enum {
static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
-#define vmx_io_bitmap_a (vmx_bitmap[VMX_IO_BITMAP_A])
-#define vmx_io_bitmap_b (vmx_bitmap[VMX_IO_BITMAP_B])
#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
@@ -2373,6 +2339,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vmx_vcpu_pi_load(vcpu, cpu);
vmx->host_pkru = read_pkru();
+ vmx->host_debugctlmsr = get_debugctlmsr();
}
static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
@@ -2930,7 +2897,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
/* highest index: VMX_PREEMPTION_TIMER_VALUE */
- vmx->nested.nested_vmx_vmcs_enum = 0x2e;
+ vmx->nested.nested_vmx_vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
}
/*
@@ -3266,6 +3233,7 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
*/
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
switch (msr_info->index) {
@@ -3277,8 +3245,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
- vmx_load_host_state(to_vmx(vcpu));
- msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+ vmx_load_host_state(vmx);
+ msr_info->data = vmx->msr_guest_kernel_gs_base;
break;
#endif
case MSR_EFER:
@@ -3318,13 +3286,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_MCG_EXT_CTL:
if (!msr_info->host_initiated &&
- !(to_vmx(vcpu)->msr_ia32_feature_control &
+ !(vmx->msr_ia32_feature_control &
FEATURE_CONTROL_LMCE))
return 1;
msr_info->data = vcpu->arch.mcg_ext_ctl;
break;
case MSR_IA32_FEATURE_CONTROL:
- msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
+ msr_info->data = vmx->msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
@@ -3341,7 +3309,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
/* Otherwise falls through */
default:
- msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
+ msr = find_msr_entry(vmx, msr_info->index);
if (msr) {
msr_info->data = msr->data;
break;
@@ -3727,7 +3695,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
#endif
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_USE_IO_BITMAPS |
+ CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_INVLPG_EXITING |
@@ -3757,6 +3725,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+ SECONDARY_EXEC_DESC |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
@@ -3982,17 +3951,17 @@ static void free_kvm_area(void)
}
}
-enum vmcs_field_type {
- VMCS_FIELD_TYPE_U16 = 0,
- VMCS_FIELD_TYPE_U64 = 1,
- VMCS_FIELD_TYPE_U32 = 2,
- VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
+enum vmcs_field_width {
+ VMCS_FIELD_WIDTH_U16 = 0,
+ VMCS_FIELD_WIDTH_U64 = 1,
+ VMCS_FIELD_WIDTH_U32 = 2,
+ VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
};
-static inline int vmcs_field_type(unsigned long field)
+static inline int vmcs_field_width(unsigned long field)
{
if (0x1 & field) /* the *_HIGH fields are all 32 bit */
- return VMCS_FIELD_TYPE_U32;
+ return VMCS_FIELD_WIDTH_U32;
return (field >> 13) & 0x3 ;
}
@@ -4005,43 +3974,66 @@ static void init_vmcs_shadow_fields(void)
{
int i, j;
- /* No checks for read only fields yet */
+ for (i = j = 0; i < max_shadow_read_only_fields; i++) {
+ u16 field = shadow_read_only_fields[i];
+ if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
+ (i + 1 == max_shadow_read_only_fields ||
+ shadow_read_only_fields[i + 1] != field + 1))
+ pr_err("Missing field from shadow_read_only_field %x\n",
+ field + 1);
+
+ clear_bit(field, vmx_vmread_bitmap);
+#ifdef CONFIG_X86_64
+ if (field & 1)
+ continue;
+#endif
+ if (j < i)
+ shadow_read_only_fields[j] = field;
+ j++;
+ }
+ max_shadow_read_only_fields = j;
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
- switch (shadow_read_write_fields[i]) {
- case GUEST_BNDCFGS:
- if (!kvm_mpx_supported())
+ u16 field = shadow_read_write_fields[i];
+ if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
+ (i + 1 == max_shadow_read_write_fields ||
+ shadow_read_write_fields[i + 1] != field + 1))
+ pr_err("Missing field from shadow_read_write_field %x\n",
+ field + 1);
+
+ /*
+ * PML and the preemption timer can be emulated, but the
+ * processor cannot vmwrite to fields that don't exist
+ * on bare metal.
+ */
+ switch (field) {
+ case GUEST_PML_INDEX:
+ if (!cpu_has_vmx_pml())
+ continue;
+ break;
+ case VMX_PREEMPTION_TIMER_VALUE:
+ if (!cpu_has_vmx_preemption_timer())
+ continue;
+ break;
+ case GUEST_INTR_STATUS:
+ if (!cpu_has_vmx_apicv())
continue;
break;
default:
break;
}
+ clear_bit(field, vmx_vmwrite_bitmap);
+ clear_bit(field, vmx_vmread_bitmap);
+#ifdef CONFIG_X86_64
+ if (field & 1)
+ continue;
+#endif
if (j < i)
- shadow_read_write_fields[j] =
- shadow_read_write_fields[i];
+ shadow_read_write_fields[j] = field;
j++;
}
max_shadow_read_write_fields = j;
-
- /* shadowed fields guest access without vmexit */
- for (i = 0; i < max_shadow_read_write_fields; i++) {
- unsigned long field = shadow_read_write_fields[i];
-
- clear_bit(field, vmx_vmwrite_bitmap);
- clear_bit(field, vmx_vmread_bitmap);
- if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64) {
- clear_bit(field + 1, vmx_vmwrite_bitmap);
- clear_bit(field + 1, vmx_vmread_bitmap);
- }
- }
- for (i = 0; i < max_shadow_read_only_fields; i++) {
- unsigned long field = shadow_read_only_fields[i];
-
- clear_bit(field, vmx_vmread_bitmap);
- if (vmcs_field_type(field) == VMCS_FIELD_TYPE_U64)
- clear_bit(field + 1, vmx_vmread_bitmap);
- }
}
static __init int alloc_kvm_area(void)
@@ -4254,9 +4246,10 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
+static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
+ bool invalidate_gpa)
{
- if (enable_ept) {
+ if (enable_ept && (invalidate_gpa || !enable_vpid)) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
@@ -4265,15 +4258,15 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
}
}
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
{
- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
}
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{
if (enable_ept)
- vmx_flush_tlb(vcpu);
+ vmx_flush_tlb(vcpu, true);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
@@ -4471,7 +4464,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
ept_load_pdptrs(vcpu);
}
- vmx_flush_tlb(vcpu);
+ vmx_flush_tlb(vcpu, true);
vmcs_writel(GUEST_CR3, guest_cr3);
}
@@ -4488,6 +4481,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
(to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+ hw_cr4 &= ~X86_CR4_UMIP;
+ } else
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+
if (cr4 & X86_CR4_VMXE) {
/*
* To use VMXON (and later other VMX instructions), a guest
@@ -5119,11 +5120,6 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
{
int f = sizeof(unsigned long);
- if (!cpu_has_vmx_msr_bitmap()) {
- WARN_ON(1);
- return;
- }
-
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
@@ -5263,7 +5259,8 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
vapic_page = kmap(vmx->nested.virtual_apic_page);
- __kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
+ __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
+ vapic_page, &max_irr);
kunmap(vmx->nested.virtual_apic_page);
status = vmcs_read16(GUEST_INTR_STATUS);
@@ -5323,14 +5320,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
- /* the PIR and ON have been set by L1. */
- kvm_vcpu_trigger_posted_interrupt(vcpu, true);
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
*/
vmx->nested.pi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
+ /* the PIR and ON have been set by L1. */
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
+ kvm_vcpu_kick(vcpu);
return 0;
}
return -1;
@@ -5509,6 +5507,7 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
struct kvm_vcpu *vcpu = &vmx->vcpu;
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
+
if (!cpu_need_virtualize_apic_accesses(vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0)
@@ -5527,6 +5526,11 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+
+ /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
+ * in vmx_set_cr4. */
+ exec_control &= ~SECONDARY_EXEC_DESC;
+
/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
(handle_vmptrld).
We can NOT enable shadow_vmcs here because we don't have yet
@@ -5646,10 +5650,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
#endif
int i;
- /* I/O */
- vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
- vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
-
if (enable_shadow_vmcs) {
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
@@ -6304,6 +6304,12 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
return kvm_set_cr4(vcpu, val);
}
+static int handle_desc(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
+ return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
@@ -6760,7 +6766,21 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if (!is_guest_mode(vcpu) &&
!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
trace_kvm_fast_mmio(gpa);
- return kvm_skip_emulated_instruction(vcpu);
+ /*
+ * Doing kvm_skip_emulated_instruction() depends on undefined
+ * behavior: Intel's manual doesn't mandate
+ * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
+ * occurs and while on real hardware it was observed to be set,
+ * other hypervisors (namely Hyper-V) don't set it, we end up
+ * advancing IP with some random value. Disable fast mmio when
+ * running nested and keep it for real hardware in hope that
+ * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
+ */
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ return kvm_skip_emulated_instruction(vcpu);
+ else
+ return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
+ NULL, 0) == EMULATE_DONE;
}
ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -6957,10 +6977,6 @@ static __init int hardware_setup(void)
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
- memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
-
- memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out;
@@ -6973,11 +6989,6 @@ static __init int hardware_setup(void)
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0;
- if (!cpu_has_vmx_shadow_vmcs())
- enable_shadow_vmcs = 0;
- if (enable_shadow_vmcs)
- init_vmcs_shadow_fields();
-
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels() ||
!cpu_has_vmx_ept_mt_wb() ||
@@ -7063,6 +7074,11 @@ static __init int hardware_setup(void)
kvm_x86_ops->cancel_hv_timer = NULL;
}
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
+
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
kvm_mce_cap_supported |= MCG_LMCE_P;
@@ -7593,17 +7609,17 @@ static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
p = ((char *)(get_vmcs12(vcpu))) + offset;
- switch (vmcs_field_type(field)) {
- case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ switch (vmcs_field_width(field)) {
+ case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
*ret = *((natural_width *)p);
return 0;
- case VMCS_FIELD_TYPE_U16:
+ case VMCS_FIELD_WIDTH_U16:
*ret = *((u16 *)p);
return 0;
- case VMCS_FIELD_TYPE_U32:
+ case VMCS_FIELD_WIDTH_U32:
*ret = *((u32 *)p);
return 0;
- case VMCS_FIELD_TYPE_U64:
+ case VMCS_FIELD_WIDTH_U64:
*ret = *((u64 *)p);
return 0;
default:
@@ -7620,17 +7636,17 @@ static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
if (offset < 0)
return offset;
- switch (vmcs_field_type(field)) {
- case VMCS_FIELD_TYPE_U16:
+ switch (vmcs_field_width(field)) {
+ case VMCS_FIELD_WIDTH_U16:
*(u16 *)p = field_value;
return 0;
- case VMCS_FIELD_TYPE_U32:
+ case VMCS_FIELD_WIDTH_U32:
*(u32 *)p = field_value;
return 0;
- case VMCS_FIELD_TYPE_U64:
+ case VMCS_FIELD_WIDTH_U64:
*(u64 *)p = field_value;
return 0;
- case VMCS_FIELD_TYPE_NATURAL_WIDTH:
+ case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
*(natural_width *)p = field_value;
return 0;
default:
@@ -7646,7 +7662,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
unsigned long field;
u64 field_value;
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
- const unsigned long *fields = shadow_read_write_fields;
+ const u16 *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
preempt_disable();
@@ -7655,23 +7671,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
for (i = 0; i < num_fields; i++) {
field = fields[i];
- switch (vmcs_field_type(field)) {
- case VMCS_FIELD_TYPE_U16:
- field_value = vmcs_read16(field);
- break;
- case VMCS_FIELD_TYPE_U32:
- field_value = vmcs_read32(field);
- break;
- case VMCS_FIELD_TYPE_U64:
- field_value = vmcs_read64(field);
- break;
- case VMCS_FIELD_TYPE_NATURAL_WIDTH:
- field_value = vmcs_readl(field);
- break;
- default:
- WARN_ON(1);
- continue;
- }
+ field_value = __vmcs_readl(field);
vmcs12_write_any(&vmx->vcpu, field, field_value);
}
@@ -7683,7 +7683,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
{
- const unsigned long *fields[] = {
+ const u16 *fields[] = {
shadow_read_write_fields,
shadow_read_only_fields
};
@@ -7702,24 +7702,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
vmcs12_read_any(&vmx->vcpu, field, &field_value);
-
- switch (vmcs_field_type(field)) {
- case VMCS_FIELD_TYPE_U16:
- vmcs_write16(field, (u16)field_value);
- break;
- case VMCS_FIELD_TYPE_U32:
- vmcs_write32(field, (u32)field_value);
- break;
- case VMCS_FIELD_TYPE_U64:
- vmcs_write64(field, (u64)field_value);
- break;
- case VMCS_FIELD_TYPE_NATURAL_WIDTH:
- vmcs_writel(field, (long)field_value);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ __vmcs_writel(field, field_value);
}
}
@@ -7788,8 +7771,10 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
{
unsigned long field;
gva_t gva;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+
/* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several
* possible lengths. The code below first zero-extends the value to 64
@@ -7832,6 +7817,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
+ switch (field) {
+#define SHADOW_FIELD_RW(x) case x:
+#include "vmx_shadow_fields.h"
+ /*
+ * The fields that can be updated by L1 without a vmexit are
+ * always updated in the vmcs02, the others go down the slow
+ * path of prepare_vmcs02.
+ */
+ break;
+ default:
+ vmx->nested.dirty_vmcs12 = true;
+ break;
+ }
+
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
@@ -7846,6 +7845,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
__pa(vmx->vmcs01.shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
+ vmx->nested.dirty_vmcs12 = true;
}
/* Emulate the VMPTRLD instruction */
@@ -8066,7 +8066,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02);
+ __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
nested_vmx_succeed(vcpu);
return kvm_skip_emulated_instruction(vcpu);
@@ -8260,6 +8260,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_XSETBV] = handle_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
+ [EXIT_REASON_GDTR_IDTR] = handle_desc,
+ [EXIT_REASON_LDTR_TR] = handle_desc,
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
@@ -9069,36 +9071,23 @@ static void vmx_set_rvi(int vector)
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
- if (!is_guest_mode(vcpu)) {
- vmx_set_rvi(max_irr);
- return;
- }
-
- if (max_irr == -1)
- return;
-
- /*
- * In guest mode. If a vmexit is needed, vmx_check_nested_events
- * handles it.
- */
- if (nested_exit_on_intr(vcpu))
- return;
-
/*
- * Else, fall back to pre-APICv interrupt injection since L2
- * is run without virtual interrupt delivery.
+ * When running L2, updating RVI is only relevant when
+ * vmcs12 virtual-interrupt-delivery enabled.
+ * However, it can be enabled only when L1 also
+ * intercepts external-interrupts and in that case
+ * we should not update vmcs02 RVI but instead intercept
+ * interrupt. Therefore, do nothing when running L2.
*/
- if (!kvm_event_needs_reinjection(vcpu) &&
- vmx_interrupt_allowed(vcpu)) {
- kvm_queue_interrupt(vcpu, max_irr, false);
- vmx_inject_irq(vcpu);
- }
+ if (!is_guest_mode(vcpu))
+ vmx_set_rvi(max_irr);
}
static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
+ bool max_irr_updated;
WARN_ON(!vcpu->arch.apicv_active);
if (pi_test_on(&vmx->pi_desc)) {
@@ -9108,7 +9097,23 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
- max_irr = kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
+ max_irr_updated =
+ kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
+
+ /*
+ * If we are running L2 and L1 has a new pending interrupt
+ * which can be injected, we should re-evaluate
+ * what should be done with this new L1 interrupt.
+ * If L1 intercepts external-interrupts, we should
+ * exit from L2 to L1. Otherwise, interrupt should be
+ * delivered directly to L2.
+ */
+ if (is_guest_mode(vcpu) && max_irr_updated) {
+ if (nested_exit_on_intr(vcpu))
+ kvm_vcpu_exiting_guest_mode(vcpu);
+ else
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
}
@@ -9223,6 +9228,12 @@ static bool vmx_xsaves_supported(void)
SECONDARY_EXEC_XSAVES;
}
+static bool vmx_umip_emulated(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_DESC;
+}
+
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
@@ -9378,7 +9389,7 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long debugctlmsr, cr3, cr4;
+ unsigned long cr3, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
@@ -9431,7 +9442,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
__write_pkru(vcpu->arch.pkru);
atomic_switch_perf_msrs(vmx);
- debugctlmsr = get_debugctlmsr();
vmx_arm_hv_timer(vcpu);
@@ -9587,8 +9597,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmexit_fill_RSB();
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
- if (debugctlmsr)
- update_debugctlmsr(debugctlmsr);
+ if (vmx->host_debugctlmsr)
+ update_debugctlmsr(vmx->host_debugctlmsr);
#ifndef CONFIG_X86_64
/*
@@ -9668,10 +9678,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- int r;
- r = vcpu_load(vcpu);
- BUG_ON(r);
+ vcpu_load(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
free_nested(vmx);
vcpu_put(vcpu);
@@ -9871,7 +9879,8 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
u32 mask =
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_DESC;
u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
@@ -10037,8 +10046,8 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
}
}
-static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12);
+static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12);
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
@@ -10127,11 +10136,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
}
- if (cpu_has_vmx_msr_bitmap() &&
- nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS) &&
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
- ;
- else
+ if (!nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_USE_MSR_BITMAPS);
}
@@ -10199,8 +10204,8 @@ static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
*/
-static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
{
int msr;
struct page *page;
@@ -10222,6 +10227,11 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+ /* Nothing to do if the MSR bitmap is not in use. */
+ if (!cpu_has_vmx_msr_bitmap() ||
+ !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
+ return false;
+
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
!pred_cmd && !spec_ctrl)
return false;
@@ -10229,32 +10239,41 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
if (is_error_page(page))
return false;
- msr_bitmap_l1 = (unsigned long *)kmap(page);
- memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+ if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+ /*
+ * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
+ * just lets the processor take the value from the virtual-APIC page;
+ * take those 256 bits directly from the L1 bitmap.
+ */
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap_l0[word] = msr_bitmap_l1[word];
+ msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+ }
+ } else {
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap_l0[word] = ~0;
+ msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+ }
+ }
- if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
- if (nested_cpu_has_apic_reg_virt(vmcs12))
- for (msr = 0x800; msr <= 0x8ff; msr++)
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- msr, MSR_TYPE_R);
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_TASKPRI),
+ MSR_TYPE_W);
+ if (nested_cpu_has_vid(vmcs12)) {
nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- APIC_BASE_MSR + (APIC_TASKPRI >> 4),
- MSR_TYPE_R | MSR_TYPE_W);
-
- if (nested_cpu_has_vid(vmcs12)) {
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- APIC_BASE_MSR + (APIC_EOI >> 4),
- MSR_TYPE_W);
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
- MSR_TYPE_W);
- }
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_EOI),
+ MSR_TYPE_W);
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_SELF_IPI),
+ MSR_TYPE_W);
}
if (spec_ctrl)
@@ -10534,25 +10553,12 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
return 0;
}
-/*
- * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
- * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
- * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
- * guest in a way that will both be appropriate to L1's requests, and our
- * needs. In addition to modifying the active vmcs (which is vmcs02), this
- * function also has additional necessary side-effects, like setting various
- * vcpu->arch fields.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- bool from_vmentry, u32 *entry_failure_code)
+static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exec_control, vmcs12_exec_ctrl;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
- vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
@@ -10560,7 +10566,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
- vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
@@ -10570,15 +10575,12 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
- vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
- vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
- vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
@@ -10588,6 +10590,125 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+
+ if (cpu_has_vmx_posted_intr())
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
+
+ /*
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ */
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
+
+ /* All VMFUNCs are currently emulated through L0 vmexits. */
+ if (cpu_has_vmx_vmfunc())
+ vmcs_write64(VM_FUNCTION_CONTROL, 0);
+
+ if (cpu_has_vmx_apicv()) {
+ vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
+ vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
+ vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
+ vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
+ }
+
+ /*
+ * Set host-state according to L0's settings (vmcs12 is irrelevant here)
+ * Some constant fields are set here by vmx_set_constant_host_state().
+ * Other fields are different per CPU, and will be set later when
+ * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
+ */
+ vmx_set_constant_host_state(vmx);
+
+ /*
+ * Set the MSR load/store lists to match L0's settings.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+
+ set_cr4_guest_host_mask(vmx);
+
+ if (vmx_mpx_supported())
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+
+ if (enable_vpid) {
+ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+ else
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ }
+
+ /*
+ * L1 may access the L2's PDPTR, so save them to construct vmcs12
+ */
+ if (enable_ept) {
+ vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
+ vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
+ vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
+ vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ }
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ bool from_vmentry, u32 *entry_failure_code)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control, vmcs12_exec_ctrl;
+
+ /*
+ * First, the fields that are shadowed. This must be kept in sync
+ * with vmx_shadow_fields.h.
+ */
+
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+
+ /*
+ * Not in vmcs02: GUEST_PML_INDEX, HOST_FS_SELECTOR, HOST_GS_SELECTOR,
+ * HOST_FS_BASE, HOST_GS_BASE.
+ */
+
if (from_vmentry &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
@@ -10610,16 +10731,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
} else {
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
- vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
- vmcs12->guest_pending_dbg_exceptions);
- vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
- vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
-
- if (nested_cpu_has_xsaves(vmcs12))
- vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
@@ -10633,7 +10745,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
@@ -10644,25 +10755,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (nested_cpu_has_preemption_timer(vmcs12))
vmx_start_preemption_timer(vcpu);
- /*
- * Whether page-faults are trapped is determined by a combination of
- * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
- * If enable_ept, L0 doesn't care about page faults and we should
- * set all of these to L1's desires. However, if !enable_ept, L0 does
- * care about (at least some) page faults, and because it is not easy
- * (if at all possible?) to merge L0 and L1's desires, we simply ask
- * to exit on each and every L2 page fault. This is done by setting
- * MASK=MATCH=0 and (see below) EB.PF=1.
- * Note that below we don't need special code to set EB.PF beyond the
- * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
- * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
- * !enable_ept, EB.PF is 1, so the "or" will always be 1.
- */
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
- enable_ept ? vmcs12->page_fault_error_code_mask : 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
- enable_ept ? vmcs12->page_fault_error_code_match : 0);
-
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
@@ -10681,22 +10773,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
exec_control |= vmcs12_exec_ctrl;
}
- /* All VMFUNCs are currently emulated through L0 vmexits. */
- if (exec_control & SECONDARY_EXEC_ENABLE_VMFUNC)
- vmcs_write64(VM_FUNCTION_CONTROL, 0);
-
- if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
- vmcs_write64(EOI_EXIT_BITMAP0,
- vmcs12->eoi_exit_bitmap0);
- vmcs_write64(EOI_EXIT_BITMAP1,
- vmcs12->eoi_exit_bitmap1);
- vmcs_write64(EOI_EXIT_BITMAP2,
- vmcs12->eoi_exit_bitmap2);
- vmcs_write64(EOI_EXIT_BITMAP3,
- vmcs12->eoi_exit_bitmap3);
+ if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
vmcs_write16(GUEST_INTR_STATUS,
vmcs12->guest_intr_status);
- }
/*
* Write an illegal value to APIC_ACCESS_ADDR. Later,
@@ -10709,24 +10788,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
-
- /*
- * Set host-state according to L0's settings (vmcs12 is irrelevant here)
- * Some constant fields are set here by vmx_set_constant_host_state().
- * Other fields are different per CPU, and will be set later when
- * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
- */
- vmx_set_constant_host_state(vmx);
-
- /*
- * Set the MSR load/store lists to match L0's settings.
- */
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
-
/*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
@@ -10758,8 +10819,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
/*
- * Merging of IO bitmap not currently supported.
- * Rather, exit every time.
+ * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
+ * for I/O port accesses.
*/
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
@@ -10796,12 +10857,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
- set_cr4_guest_host_mask(vmx);
-
- if (from_vmentry &&
- vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
-
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
vcpu->arch.tsc_offset + vmcs12->tsc_offset);
@@ -10810,9 +10865,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
-
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
@@ -10823,16 +10875,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* even if spawn a lot of nested vCPUs.
*/
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02, true);
}
} else {
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
- vmx_flush_tlb(vcpu);
+ vmx_flush_tlb(vcpu, true);
}
-
}
if (enable_pml) {
@@ -10881,6 +10930,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
+ if (vmx->nested.dirty_vmcs12) {
+ prepare_vmcs02_full(vcpu, vmcs12, from_vmentry);
+ vmx->nested.dirty_vmcs12 = false;
+ }
+
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
entry_failure_code))
@@ -10889,16 +10943,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
- /*
- * L1 may access the L2's PDPTR, so save them to construct vmcs12
- */
- if (enable_ept) {
- vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
- vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
- vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
- vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
- }
-
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
return 0;
@@ -11254,7 +11298,6 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
if (block_nested_events)
return -EBUSY;
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
- vcpu->arch.exception.pending = false;
return 0;
}
@@ -11535,11 +11578,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* L1's vpid. TODO: move to a more elaborate solution, giving
* each L2 its own vpid and exposing the vpid feature to L1.
*/
- vmx_flush_tlb(vcpu);
+ vmx_flush_tlb(vcpu, true);
}
- /* Restore posted intr vector. */
- if (nested_cpu_has_posted_intr(vmcs12))
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
@@ -11800,6 +11840,21 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+
+ /*
+ * RDPID causes #UD if disabled through secondary execution controls.
+ * Because it is marked as EmulateOnUD, we need to intercept it here.
+ */
+ if (info->intercept == x86_intercept_rdtscp &&
+ !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+ ctxt->exception.vector = UD_VECTOR;
+ ctxt->exception.error_code_valid = false;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ /* TODO: check more intercepts... */
return X86EMUL_CONTINUE;
}
@@ -12313,6 +12368,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.handle_external_intr = vmx_handle_external_intr,
.mpx_supported = vmx_mpx_supported,
.xsaves_supported = vmx_xsaves_supported,
+ .umip_emulated = vmx_umip_emulated,
.check_nested_events = vmx_check_nested_events,
diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h
new file mode 100644
index 000000000000..cd0c75f6d037
--- /dev/null
+++ b/arch/x86/kvm/vmx_shadow_fields.h
@@ -0,0 +1,77 @@
+#ifndef SHADOW_FIELD_RO
+#define SHADOW_FIELD_RO(x)
+#endif
+#ifndef SHADOW_FIELD_RW
+#define SHADOW_FIELD_RW(x)
+#endif
+
+/*
+ * We do NOT shadow fields that are modified when L0
+ * traps and emulates any vmx instruction (e.g. VMPTRLD,
+ * VMXON...) executed by L1.
+ * For example, VM_INSTRUCTION_ERROR is read
+ * by L1 if a vmx instruction fails (part of the error path).
+ * Note the code assumes this logic. If for some reason
+ * we start shadowing these fields then we need to
+ * force a shadow sync when L0 emulates vmx instructions
+ * (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
+ * by nested_vmx_failValid)
+ *
+ * When adding or removing fields here, note that shadowed
+ * fields must always be synced by prepare_vmcs02, not just
+ * prepare_vmcs02_full.
+ */
+
+/*
+ * Keeping the fields ordered by size is an attempt at improving
+ * branch prediction in vmcs_read_any and vmcs_write_any.
+ */
+
+/* 16-bits */
+SHADOW_FIELD_RW(GUEST_CS_SELECTOR)
+SHADOW_FIELD_RW(GUEST_INTR_STATUS)
+SHADOW_FIELD_RW(GUEST_PML_INDEX)
+SHADOW_FIELD_RW(HOST_FS_SELECTOR)
+SHADOW_FIELD_RW(HOST_GS_SELECTOR)
+
+/* 32-bits */
+SHADOW_FIELD_RO(VM_EXIT_REASON)
+SHADOW_FIELD_RO(VM_EXIT_INTR_INFO)
+SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN)
+SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD)
+SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE)
+SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE)
+SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL)
+SHADOW_FIELD_RW(EXCEPTION_BITMAP)
+SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
+SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
+SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
+SHADOW_FIELD_RW(TPR_THRESHOLD)
+SHADOW_FIELD_RW(GUEST_CS_LIMIT)
+SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
+SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
+SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
+
+/* Natural width */
+SHADOW_FIELD_RO(EXIT_QUALIFICATION)
+SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS)
+SHADOW_FIELD_RW(GUEST_RIP)
+SHADOW_FIELD_RW(GUEST_RSP)
+SHADOW_FIELD_RW(GUEST_CR0)
+SHADOW_FIELD_RW(GUEST_CR3)
+SHADOW_FIELD_RW(GUEST_CR4)
+SHADOW_FIELD_RW(GUEST_RFLAGS)
+SHADOW_FIELD_RW(GUEST_CS_BASE)
+SHADOW_FIELD_RW(GUEST_ES_BASE)
+SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK)
+SHADOW_FIELD_RW(CR0_READ_SHADOW)
+SHADOW_FIELD_RW(CR4_READ_SHADOW)
+SHADOW_FIELD_RW(HOST_FS_BASE)
+SHADOW_FIELD_RW(HOST_GS_BASE)
+
+/* 64-bit */
+SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS)
+SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH)
+
+#undef SHADOW_FIELD_RO
+#undef SHADOW_FIELD_RW
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f9c5171dad2b..c8a0b545ac20 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -67,6 +67,8 @@
#include <asm/pvclock.h>
#include <asm/div64.h>
#include <asm/irq_remapping.h>
+#include <asm/mshyperv.h>
+#include <asm/hypervisor.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -177,7 +179,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
- { "efer_reload", VCPU_STAT(efer_reload) },
{ "fpu_reload", VCPU_STAT(fpu_reload) },
{ "insn_emulation", VCPU_STAT(insn_emulation) },
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
@@ -702,7 +703,8 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
!vcpu->guest_xcr0_loaded) {
/* kvm_set_xcr() also depends on this */
- xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+ if (vcpu->arch.xcr0 != host_xcr0)
+ xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
vcpu->guest_xcr0_loaded = 1;
}
}
@@ -794,6 +796,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
return 1;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+ return 1;
+
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
@@ -1037,6 +1042,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MCG_CTL,
MSR_IA32_MCG_EXT_CTL,
MSR_IA32_SMBASE,
+ MSR_SMI_COUNT,
MSR_PLATFORM_INFO,
MSR_MISC_FEATURES_ENABLES,
};
@@ -1378,6 +1384,11 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
return tsc;
}
+static inline int gtod_is_based_on_tsc(int mode)
+{
+ return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
+}
+
static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
@@ -1397,7 +1408,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
* perform request to enable masterclock.
*/
if (ka->use_master_clock ||
- (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
+ (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
@@ -1460,6 +1471,19 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
vcpu->arch.tsc_offset = offset;
}
+static inline bool kvm_check_tsc_unstable(void)
+{
+#ifdef CONFIG_X86_64
+ /*
+ * TSC is marked unstable when we're running on Hyper-V,
+ * 'TSC page' clocksource is good.
+ */
+ if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
+ return false;
+#endif
+ return check_tsc_unstable();
+}
+
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
@@ -1505,7 +1529,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
*/
if (synchronizing &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
- if (!check_tsc_unstable()) {
+ if (!kvm_check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
@@ -1605,18 +1629,43 @@ static u64 read_tsc(void)
return last;
}
-static inline u64 vgettsc(u64 *cycle_now)
+static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
{
long v;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+ u64 tsc_pg_val;
+
+ switch (gtod->clock.vclock_mode) {
+ case VCLOCK_HVCLOCK:
+ tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
+ tsc_timestamp);
+ if (tsc_pg_val != U64_MAX) {
+ /* TSC page valid */
+ *mode = VCLOCK_HVCLOCK;
+ v = (tsc_pg_val - gtod->clock.cycle_last) &
+ gtod->clock.mask;
+ } else {
+ /* TSC page invalid */
+ *mode = VCLOCK_NONE;
+ }
+ break;
+ case VCLOCK_TSC:
+ *mode = VCLOCK_TSC;
+ *tsc_timestamp = read_tsc();
+ v = (*tsc_timestamp - gtod->clock.cycle_last) &
+ gtod->clock.mask;
+ break;
+ default:
+ *mode = VCLOCK_NONE;
+ }
- *cycle_now = read_tsc();
+ if (*mode == VCLOCK_NONE)
+ *tsc_timestamp = v = 0;
- v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
return v * gtod->clock.mult;
}
-static int do_monotonic_boot(s64 *t, u64 *cycle_now)
+static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
@@ -1625,9 +1674,8 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
do {
seq = read_seqcount_begin(&gtod->seq);
- mode = gtod->clock.vclock_mode;
ns = gtod->nsec_base;
- ns += vgettsc(cycle_now);
+ ns += vgettsc(tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
ns += gtod->boot_ns;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -1636,7 +1684,7 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
return mode;
}
-static int do_realtime(struct timespec *ts, u64 *cycle_now)
+static int do_realtime(struct timespec *ts, u64 *tsc_timestamp)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
unsigned long seq;
@@ -1645,10 +1693,9 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now)
do {
seq = read_seqcount_begin(&gtod->seq);
- mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->nsec_base;
- ns += vgettsc(cycle_now);
+ ns += vgettsc(tsc_timestamp, &mode);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -1658,25 +1705,26 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now)
return mode;
}
-/* returns true if host is using tsc clocksource */
-static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
+/* returns true if host is using TSC based clocksource */
+static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
{
/* checked again under seqlock below */
- if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+ if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
return false;
- return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
+ return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
+ tsc_timestamp));
}
-/* returns true if host is using tsc clocksource */
+/* returns true if host is using TSC based clocksource */
static bool kvm_get_walltime_and_clockread(struct timespec *ts,
- u64 *cycle_now)
+ u64 *tsc_timestamp)
{
/* checked again under seqlock below */
- if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+ if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
return false;
- return do_realtime(ts, cycle_now) == VCLOCK_TSC;
+ return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
}
#endif
@@ -2119,6 +2167,12 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
vcpu->arch.pv_time_enabled = false;
}
+static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+ ++vcpu->stat.tlb_flush;
+ kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
+}
+
static void record_steal_time(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
@@ -2128,7 +2182,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
- vcpu->arch.st.steal.preempted = 0;
+ /*
+ * Doing a TLB flush here, on the guest's behalf, can avoid
+ * expensive IPIs.
+ */
+ if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ kvm_vcpu_flush_tlb(vcpu, false);
if (vcpu->arch.st.steal.version & 1)
vcpu->arch.st.steal.version += 1; /* first time write, random junk */
@@ -2229,6 +2288,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vcpu->arch.smbase = data;
break;
+ case MSR_SMI_COUNT:
+ if (!msr_info->host_initiated)
+ return 1;
+ vcpu->arch.smi_count = data;
+ break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
@@ -2503,6 +2567,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
msr_info->data = vcpu->arch.smbase;
break;
+ case MSR_SMI_COUNT:
+ msr_info->data = vcpu->arch.smi_count;
+ break;
case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */
msr_info->data = 1000ULL;
@@ -2870,13 +2937,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
- if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
+ if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
rdtsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
- if (check_tsc_unstable()) {
+ if (kvm_check_tsc_unstable()) {
u64 offset = kvm_compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
@@ -2905,7 +2972,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
- vcpu->arch.st.steal.preempted = 1;
+ vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal.preempted,
@@ -2939,12 +3006,18 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
pagefault_enable();
kvm_x86_ops->vcpu_put(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
+ /*
+ * If userspace has set any breakpoints or watchpoints, dr6 is restored
+ * on every vmexit, but if not, we might have a stale dr6 from the
+ * guest. do_debug expects dr6 to be cleared after it runs, do the same.
+ */
+ set_debugreg(0, 6);
}
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
- if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+ if (vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
return kvm_apic_get_state(vcpu, s);
@@ -3473,6 +3546,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void *buffer;
} u;
+ vcpu_load(vcpu);
+
u.buffer = NULL;
switch (ioctl) {
case KVM_GET_LAPIC: {
@@ -3498,8 +3573,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!lapic_in_kernel(vcpu))
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
- if (IS_ERR(u.lapic))
- return PTR_ERR(u.lapic);
+ if (IS_ERR(u.lapic)) {
+ r = PTR_ERR(u.lapic);
+ goto out_nofree;
+ }
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
break;
@@ -3673,8 +3750,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
- if (IS_ERR(u.xsave))
- return PTR_ERR(u.xsave);
+ if (IS_ERR(u.xsave)) {
+ r = PTR_ERR(u.xsave);
+ goto out_nofree;
+ }
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
@@ -3696,8 +3775,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
case KVM_SET_XCRS: {
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
- if (IS_ERR(u.xcrs))
- return PTR_ERR(u.xcrs);
+ if (IS_ERR(u.xcrs)) {
+ r = PTR_ERR(u.xcrs);
+ goto out_nofree;
+ }
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
@@ -3741,6 +3822,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
out:
kfree(u.buffer);
+out_nofree:
+ vcpu_put(vcpu);
return r;
}
@@ -4297,6 +4380,36 @@ set_identity_unlock:
r = kvm_vm_ioctl_enable_cap(kvm, &cap);
break;
}
+ case KVM_MEMORY_ENCRYPT_OP: {
+ r = -ENOTTY;
+ if (kvm_x86_ops->mem_enc_op)
+ r = kvm_x86_ops->mem_enc_op(kvm, argp);
+ break;
+ }
+ case KVM_MEMORY_ENCRYPT_REG_REGION: {
+ struct kvm_enc_region region;
+
+ r = -EFAULT;
+ if (copy_from_user(&region, argp, sizeof(region)))
+ goto out;
+
+ r = -ENOTTY;
+ if (kvm_x86_ops->mem_enc_reg_region)
+ r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
+ break;
+ }
+ case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
+ struct kvm_enc_region region;
+
+ r = -EFAULT;
+ if (copy_from_user(&region, argp, sizeof(region)))
+ goto out;
+
+ r = -ENOTTY;
+ if (kvm_x86_ops->mem_enc_unreg_region)
+ r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
+ break;
+ }
default:
r = -ENOTTY;
}
@@ -5705,7 +5818,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
- if (kvm_vcpu_check_breakpoint(vcpu, &r))
+ if (!(emulation_type & EMULTYPE_SKIP) &&
+ kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
@@ -5891,6 +6005,43 @@ static void tsc_khz_changed(void *data)
__this_cpu_write(cpu_tsc_khz, khz);
}
+#ifdef CONFIG_X86_64
+static void kvm_hyperv_tsc_notifier(void)
+{
+ struct kvm *kvm;
+ struct kvm_vcpu *vcpu;
+ int cpu;
+
+ spin_lock(&kvm_lock);
+ list_for_each_entry(kvm, &vm_list, vm_list)
+ kvm_make_mclock_inprogress_request(kvm);
+
+ hyperv_stop_tsc_emulation();
+
+ /* TSC frequency always matches when on Hyper-V */
+ for_each_present_cpu(cpu)
+ per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
+ kvm_max_guest_tsc_khz = tsc_khz;
+
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ struct kvm_arch *ka = &kvm->arch;
+
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+
+ pvclock_update_vm_gtod_copy(kvm);
+
+ kvm_for_each_vcpu(cpu, vcpu, kvm)
+ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+
+ kvm_for_each_vcpu(cpu, vcpu, kvm)
+ kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
+
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+ }
+ spin_unlock(&kvm_lock);
+}
+#endif
+
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
@@ -6112,9 +6263,9 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
update_pvclock_gtod(tk);
/* disable master clock if host does not trust, or does not
- * use, TSC clocksource
+ * use, TSC based clocksource.
*/
- if (gtod->clock.vclock_mode != VCLOCK_TSC &&
+ if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
atomic_read(&kvm_guest_has_master_clock) != 0)
queue_work(system_long_wq, &pvclock_gtod_work);
@@ -6176,6 +6327,9 @@ int kvm_arch_init(void *opaque)
kvm_lapic_init();
#ifdef CONFIG_X86_64
pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
+
+ if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+ set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
#endif
return 0;
@@ -6188,6 +6342,10 @@ out:
void kvm_arch_exit(void)
{
+#ifdef CONFIG_X86_64
+ if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+ clear_hv_tscchange_cb();
+#endif
kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
@@ -6450,6 +6608,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
kvm_x86_ops->queue_exception(vcpu);
} else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
vcpu->arch.smi_pending = false;
+ ++vcpu->arch.smi_count;
enter_smm(vcpu);
} else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
@@ -6751,7 +6910,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
- if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+ if (vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@@ -6760,12 +6919,6 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
-{
- ++vcpu->stat.tlb_flush;
- kvm_x86_ops->tlb_flush(vcpu);
-}
-
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -6834,7 +6987,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
- kvm_vcpu_flush_tlb(vcpu);
+ kvm_vcpu_flush_tlb(vcpu, true);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
@@ -6983,10 +7136,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* This handles the case where a posted interrupt was
* notified with kvm_vcpu_kick.
*/
- if (kvm_lapic_enabled(vcpu)) {
- if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
- kvm_x86_ops->sync_pir_to_irr(vcpu);
- }
+ if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
+ kvm_x86_ops->sync_pir_to_irr(vcpu);
if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
|| need_resched() || signal_pending(current)) {
@@ -7007,7 +7158,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
trace_kvm_entry(vcpu->vcpu_id);
- wait_lapic_expire(vcpu);
+ if (lapic_timer_advance_ns)
+ wait_lapic_expire(vcpu);
guest_enter_irqoff();
if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -7268,8 +7420,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
+ vcpu_load(vcpu);
kvm_sigset_activate(vcpu);
-
kvm_load_guest_fpu(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
@@ -7316,11 +7468,14 @@ out:
post_kvm_run_save(vcpu);
kvm_sigset_deactivate(vcpu);
+ vcpu_put(vcpu);
return r;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ vcpu_load(vcpu);
+
if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
/*
* We are here if userspace calls get_regs() in the middle of
@@ -7354,11 +7509,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_get_rflags(vcpu);
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
+ vcpu_load(vcpu);
+
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
@@ -7388,6 +7546,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvm_make_request(KVM_REQ_EVENT, vcpu);
+ vcpu_put(vcpu);
return 0;
}
@@ -7406,6 +7565,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
{
struct desc_ptr dt;
+ vcpu_load(vcpu);
+
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -7437,12 +7598,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ vcpu_load(vcpu);
+
kvm_apic_accept_events(vcpu);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
vcpu->arch.pv.pv_unhalted)
@@ -7450,21 +7614,26 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else
mp_state->mp_state = vcpu->arch.mp_state;
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int ret = -EINVAL;
+
+ vcpu_load(vcpu);
+
if (!lapic_in_kernel(vcpu) &&
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
- return -EINVAL;
+ goto out;
/* INITs are latched while in SMM */
if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
(mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
- return -EINVAL;
+ goto out;
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
@@ -7472,7 +7641,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
} else
vcpu->arch.mp_state = mp_state->mp_state;
kvm_make_request(KVM_REQ_EVENT, vcpu);
- return 0;
+
+ ret = 0;
+out:
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
@@ -7526,18 +7699,21 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int mmu_reset_needed = 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
+ int ret = -EINVAL;
+
+ vcpu_load(vcpu);
if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
(sregs->cr4 & X86_CR4_OSXSAVE))
- return -EINVAL;
+ goto out;
if (kvm_valid_sregs(vcpu, sregs))
- return -EINVAL;
+ goto out;
apic_base_msr.data = sregs->apic_base;
apic_base_msr.host_initiated = true;
if (kvm_set_apic_base(vcpu, &apic_base_msr))
- return -EINVAL;
+ goto out;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
@@ -7603,7 +7779,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_EVENT, vcpu);
- return 0;
+ ret = 0;
+out:
+ vcpu_put(vcpu);
+ return ret;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -7612,6 +7791,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
unsigned long rflags;
int i, r;
+ vcpu_load(vcpu);
+
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
if (vcpu->arch.exception.pending)
@@ -7657,7 +7838,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
r = 0;
out:
-
+ vcpu_put(vcpu);
return r;
}
@@ -7671,6 +7852,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa_t gpa;
int idx;
+ vcpu_load(vcpu);
+
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -7679,14 +7862,17 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
tr->writeable = 1;
tr->usermode = 0;
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxregs_state *fxsave =
- &vcpu->arch.guest_fpu.state.fxsave;
+ struct fxregs_state *fxsave;
+
+ vcpu_load(vcpu);
+ fxsave = &vcpu->arch.guest_fpu.state.fxsave;
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
@@ -7696,13 +7882,17 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- struct fxregs_state *fxsave =
- &vcpu->arch.guest_fpu.state.fxsave;
+ struct fxregs_state *fxsave;
+
+ vcpu_load(vcpu);
+
+ fxsave = &vcpu->arch.guest_fpu.state.fxsave;
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
@@ -7713,6 +7903,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
+ vcpu_put(vcpu);
return 0;
}
@@ -7769,7 +7960,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
{
struct kvm_vcpu *vcpu;
- if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+ if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n");
@@ -7781,16 +7972,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
- int r;
-
kvm_vcpu_mtrr_init(vcpu);
- r = vcpu_load(vcpu);
- if (r)
- return r;
+ vcpu_load(vcpu);
kvm_vcpu_reset(vcpu, false);
kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
- return r;
+ return 0;
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -7800,13 +7987,15 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
kvm_hv_vcpu_postcreate(vcpu);
- if (vcpu_load(vcpu))
+ if (mutex_lock_killable(&vcpu->mutex))
return;
+ vcpu_load(vcpu);
msr.data = 0x0;
msr.index = MSR_IA32_TSC;
msr.host_initiated = true;
kvm_write_tsc(vcpu, &msr);
vcpu_put(vcpu);
+ mutex_unlock(&vcpu->mutex);
if (!kvmclock_periodic_sync)
return;
@@ -7817,11 +8006,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- int r;
vcpu->arch.apf.msr_val = 0;
- r = vcpu_load(vcpu);
- BUG_ON(r);
+ vcpu_load(vcpu);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
@@ -7833,6 +8020,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.hflags = 0;
vcpu->arch.smi_pending = 0;
+ vcpu->arch.smi_count = 0;
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
@@ -7926,7 +8114,7 @@ int kvm_arch_hardware_enable(void)
return ret;
local_tsc = rdtsc();
- stable = !check_tsc_unstable();
+ stable = !kvm_check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!stable && vcpu->cpu == smp_processor_id())
@@ -8192,9 +8380,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
- int r;
- r = vcpu_load(vcpu);
- BUG_ON(r);
+ vcpu_load(vcpu);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index d0b95b7a90b4..b91215d1fd80 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -12,6 +12,7 @@
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.exception.pending = false;
vcpu->arch.exception.injected = false;
}
@@ -265,36 +266,8 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
static inline bool kvm_mwait_in_guest(void)
{
- unsigned int eax, ebx, ecx, edx;
-
- if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT))
- return false;
-
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_AMD:
- /* All AMD CPUs have a working MWAIT implementation */
- return true;
- case X86_VENDOR_INTEL:
- /* Handle Intel below */
- break;
- default:
- return false;
- }
-
- /*
- * Intel CPUs without CPUID5_ECX_INTERRUPT_BREAK are problematic as
- * they would allow guest to stop the CPU completely by disabling
- * interrupts then invoking MWAIT.
- */
- if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
- return false;
-
- cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
- if (!(ecx & CPUID5_ECX_INTERRUPT_BREAK))
- return false;
-
- return true;
+ return boot_cpu_has(X86_FEATURE_MWAIT) &&
+ !boot_cpu_has_bug(X86_BUG_MONITOR);
}
#endif
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index fe7d57a8fb60..1555bd7d3449 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -678,6 +678,25 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
}
/**
+ * pat_pfn_immune_to_uc_mtrr - Check whether the PAT memory type
+ * of @pfn cannot be overridden by UC MTRR memory type.
+ *
+ * Only to be called when PAT is enabled.
+ *
+ * Returns true, if the PAT memory type of @pfn is UC, UC-, or WC.
+ * Returns false in other cases.
+ */
+bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn)
+{
+ enum page_cache_mode cm = lookup_memtype(PFN_PHYS(pfn));
+
+ return cm == _PAGE_CACHE_MODE_UC ||
+ cm == _PAGE_CACHE_MODE_UC_MINUS ||
+ cm == _PAGE_CACHE_MODE_WC;
+}
+EXPORT_SYMBOL_GPL(pat_pfn_immune_to_uc_mtrr);
+
+/**
* io_reserve_memtype - Request a memory type mapping for a region of memory
* @start: start (physical address) of the region
* @end: end (physical address) of the region
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 13b4f19b9131..159a897151d6 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -694,6 +694,9 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
int i, ret = 0;
pte_t *pte;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
if (kmap_ops) {
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
kmap_ops, count);
@@ -736,6 +739,9 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
{
int i, ret = 0;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return 0;
+
for (i = 0; i < count; i++) {
unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
unsigned long pfn = page_to_pfn(pages[i]);
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 497cc55a0c16..96f26e026783 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -9,7 +9,9 @@
#include <asm/boot.h>
#include <asm/asm.h>
+#include <asm/msr.h>
#include <asm/page_types.h>
+#include <asm/percpu.h>
#include <asm/unwind_hints.h>
#include <xen/interface/elfnote.h>
@@ -35,6 +37,20 @@ ENTRY(startup_xen)
mov %_ASM_SI, xen_start_info
mov $init_thread_union+THREAD_SIZE, %_ASM_SP
+#ifdef CONFIG_X86_64
+ /* Set up %gs.
+ *
+ * The base of %gs always points to the bottom of the irqstack
+ * union. If the stack protector canary is enabled, it is
+ * located at %gs:40. Note that, on SMP, the boot cpu uses
+ * init data section till per cpu areas are set up.
+ */
+ movl $MSR_GS_BASE,%ecx
+ movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
+ cdq
+ wrmsr
+#endif
+
jmp xen_start_kernel
END(startup_xen)
__FINIT
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d650c5b6ec90..f505e9a01b2d 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -79,7 +79,12 @@ config ACPI_DEBUGGER_USER
endif
config ACPI_SPCR_TABLE
- bool
+ bool "ACPI Serial Port Console Redirection Support"
+ default y if X86
+ help
+ Enable support for Serial Port Console Redirection (SPCR) Table.
+ This table provides information about the configuration of the
+ earlycon console.
config ACPI_LPIT
bool
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index f53ccc680238..76fb96966f7b 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -53,7 +53,7 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
-static bool brightness_switch_enabled = 1;
+static bool brightness_switch_enabled = true;
module_param(brightness_switch_enabled, bool, 0644);
/*
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index 2243c8164b34..e65478593f9a 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,7 +51,7 @@
/* Common info for tool signons */
#define ACPICA_NAME "Intel ACPI Component Architecture"
-#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2017 Intel Corporation"
+#define ACPICA_COPYRIGHT "Copyright (c) 2000 - 2018 Intel Corporation"
#if ACPI_MACHINE_WIDTH == 64
#define ACPI_WIDTH " (64-bit version)"
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 49bf47ca5477..c349ffdf5557 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconvert.h b/drivers/acpi/acpica/acconvert.h
index c84223b60b35..ce6e8db83e27 100644
--- a/drivers/acpi/acpica/acconvert.h
+++ b/drivers/acpi/acpica/acconvert.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 54b8d9df9423..8b2cca5a717b 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index f8f3a6e74128..fab590bc5fd3 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index a2adfd42f85c..1b0269f6ac2d 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 45ef3f5dc9ad..27f322b2fed1 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index cd722d8edacb..3569aa3bf5ee 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 29555c8789a3..744374ab9285 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index a56675f0661e..3ba3ff0f1c04 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 128a3d71b598..6463340c4522 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 2fb1bb78d85c..6c8f364fe2fc 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 5226146190bf..a1f4d3f385c8 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index cbd59a302679..36c2c5825986 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index c23c47328060..e25634951d03 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index cdfcad8eb74c..7c27bcee6ac7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 438f3098a093..20f36949928a 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 62134bdbeda6..0338ac32f9c6 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 84a3ceb6e384..15b23414245a 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index b6b29d717824..00d21d2f766e 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index f54dc5a34bdc..b0e9492a6297 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 1236e9a414e4..b680c229ddd5 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbcmds.c b/drivers/acpi/acpica/dbcmds.c
index 5984b90eb590..4112c85f2aab 100644
--- a/drivers/acpi/acpica/dbcmds.c
+++ b/drivers/acpi/acpica/dbcmds.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbconvert.c b/drivers/acpi/acpica/dbconvert.c
index 32d546f0db2f..27236a6c51ff 100644
--- a/drivers/acpi/acpica/dbconvert.c
+++ b/drivers/acpi/acpica/dbconvert.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 5a606eac0c22..7df920cda77d 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbexec.c b/drivers/acpi/acpica/dbexec.c
index ed088fceb18d..8ad9e6d9e54b 100644
--- a/drivers/acpi/acpica/dbexec.c
+++ b/drivers/acpi/acpica/dbexec.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbfileio.c b/drivers/acpi/acpica/dbfileio.c
index cf9607945704..084bb332f8e2 100644
--- a/drivers/acpi/acpica/dbfileio.c
+++ b/drivers/acpi/acpica/dbfileio.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbhistry.c b/drivers/acpi/acpica/dbhistry.c
index 7d08974c64c2..55c0f2742339 100644
--- a/drivers/acpi/acpica/dbhistry.c
+++ b/drivers/acpi/acpica/dbhistry.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c
index 954ca3b981a7..f7c661e06f37 100644
--- a/drivers/acpi/acpica/dbinput.c
+++ b/drivers/acpi/acpica/dbinput.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbmethod.c b/drivers/acpi/acpica/dbmethod.c
index df62c9245efc..2cda0bff6f2c 100644
--- a/drivers/acpi/acpica/dbmethod.c
+++ b/drivers/acpi/acpica/dbmethod.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbnames.c b/drivers/acpi/acpica/dbnames.c
index 8c207c772517..8796fc1e0360 100644
--- a/drivers/acpi/acpica/dbnames.c
+++ b/drivers/acpi/acpica/dbnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbobject.c b/drivers/acpi/acpica/dbobject.c
index e7b415c20aa8..d2063cbab39a 100644
--- a/drivers/acpi/acpica/dbobject.c
+++ b/drivers/acpi/acpica/dbobject.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbstats.c b/drivers/acpi/acpica/dbstats.c
index 99fb0160b8fb..d6aaef54e369 100644
--- a/drivers/acpi/acpica/dbstats.c
+++ b/drivers/acpi/acpica/dbstats.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbtest.c b/drivers/acpi/acpica/dbtest.c
index c6bee6143266..56e446b89d18 100644
--- a/drivers/acpi/acpica/dbtest.c
+++ b/drivers/acpi/acpica/dbtest.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbutils.c b/drivers/acpi/acpica/dbutils.c
index bfa972b64171..cd40854ee9be 100644
--- a/drivers/acpi/acpica/dbutils.c
+++ b/drivers/acpi/acpica/dbutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index b6985323e7eb..77bbfa97cf91 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 2873455c986d..04a9f60e7ad1 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 4b6ebc2a2851..606697e741a5 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsdebug.c b/drivers/acpi/acpica/dsdebug.c
index d1f457eda980..14ec52eba408 100644
--- a/drivers/acpi/acpica/dsdebug.c
+++ b/drivers/acpi/acpica/dsdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 0cab34a593d5..95ea639a9424 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index b1842dd4edf7..946ff2e130d9 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d7fc36917c67..b9c460c2d763 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 27a7de95f7b0..157f1645d91a 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index b21fe084ffc8..4fa3400a95ba 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 0336df7ac47d..0181cd317751 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index 5a602b75084e..902bee78036c 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 4c5faf629a83..a4ce0b4a55a6 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 22f45d090733..2c07d220a50f 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 5771e4e4a99a..fa4ef9229e17 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b3d0aaec8203..3b1313ba60d0 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 3e081983d2ee..8b5c3613c060 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index da111a1f5bfb..ee002d17526e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d3b6b314fa50..4b2b0b44a16b 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 0ce33b0f430c..012b80de1501 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 263d8fc4a9e2..410a3907c051 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 3a3cb8624f41..7ce756cc28ab 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 8649c6242478..8ad4816c9950 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index c8adb400330a..729a8960a3af 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index 2db61ef1b4a3..20fb51c06b8d 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 4f6bb3f016ab..40d0b1f541a0 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index bb58419f0d61..de196c8e3f30 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 93ec528bcd9a..4187f563fede 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8ce73b962006..d5594f79f877 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index dd1b9dd64cef..9b3c01bf1438 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index c773ac4892cb..96c2520f9570 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 67c7c4ce276c..cbb1598df9dc 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index beba9d56a0d8..705fcd86151a 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconcat.c b/drivers/acpi/acpica/exconcat.c
index 59b8de2f07d3..ea20e10dd1f2 100644
--- a/drivers/acpi/acpica/exconcat.c
+++ b/drivers/acpi/acpica/exconcat.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 61813bd43f9e..827f47b72663 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 23ebadb06a95..9abcc41a573b 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index b8adb11f1b07..3dece45dd997 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index a8191d2ca5e3..d931a66a16e3 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index b2ff61bdb9a8..4989ce9591ae 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 5fda981f6498..e3b0650e5bb6 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index a656608dca84..3d0f274be88b 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index dbad3ebd7df5..1518fcb22ae1 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index ecd95b3f35f1..24c9741dee48 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index caa5ed1f65ec..c7b249cda5c0 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index f787651348c1..dae01c93e480 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 57980b7d3594..3cafa1d6f31a 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index ce857addc8db..f16c655121ff 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 688032b58a21..8b39fffce6dc 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 8de060664204..1d1040f2e3f8 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 7bcc9d809b7e..387c438aa485 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 91c1de046442..77fa8d9aa5bf 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 5e1854ea85f6..b104bc3ca809 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 1c7c9962b0de..2643d34f194d 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index bdd43cde8f36..8f106bdcad5f 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index 56f59cf5da29..3d458d1996b0 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index 4ba7fcbf23b0..905443a3c28f 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index ad3b610057f3..420d9b145d2e 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index ae9df8672d9e..9a67d507a132 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 34d608358eaf..fb80d3f55d63 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fad249e774b4..68e958d4c25f 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 12626d021a9b..64855b62a5ae 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 09b6822aa5cc..c1c54af148d0 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 283819930be6..faa2fa45eb1c 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index aa6e00081915..f3e7b7851a3a 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 1fe7387a00e6..c85c373ecbc4 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 5b4282902a83..511e3b8ffc6d 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index d1679035d5f3..65d82e6add0b 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index b3c6e439933c..d320b129b7d7 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index e5c095ca6083..589c774bbf9a 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 33e652a12fca..07f672b5a1d1 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 8ba5b32c9f71..ce57ccf4c1bf 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 67b7370dcae5..ce296ac14cf0 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsconvert.c b/drivers/acpi/acpica/nsconvert.c
index d55dcc82f434..2f9d5d190fa9 100644
--- a/drivers/acpi/acpica/nsconvert.c
+++ b/drivers/acpi/acpica/nsconvert.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 4123b5077a7d..e2ac16818dc3 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 5026594763ea..09ac00dee450 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index d22167cbd0ca..c2d883b8c45e 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 9c6297949712..c17af4a3ab67 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index d2915e186ae1..fdfe9309bd33 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 22c92d1a24d8..c686eda7ca66 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 707b2aa501e1..757e44555ec3 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 2fc33a5203f4..c5b22ea5b369 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 9d14b509529e..4f1f6d6d9ddf 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index a8ea8fb1d299..7805d5ce8127 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 418ef2ac82ab..7b6b6d281f1c 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 06037e044694..29c3973c7815 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index e91dbee9235f..a469447f5c02 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index b43fe5fce64b..0487fdb59b0e 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 6b6e6f498cff..dd7ae1bc8af8 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 9b51f65823b2..1075bd9541f5 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 106966235805..e9603fc9586c 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 47f689ec3fcb..ac1fbf767cac 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 171e2faa7c50..dbc51bc5fdd6 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bb04dec168ad..7dca287d7690 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index c0b179883ff2..b18f1e048985 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index a402ad772a1e..d31f3eb23225 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index eff22950232b..1dc1fc79297e 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index ac88319dc111..2474ff961294 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 22d7f1d6849b..f49cdcc65700 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index c06d6e2fc7a5..f9fa88c79b32 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index cd59dfe6a47d..fe151f42de3a 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 22a37c82af19..bc5c779e54e8 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c88a681586bf..d2270ade5cf8 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index a131a28bb09d..213bad89675b 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 659fb718504a..576f7aae162b 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f72ff0b54a63..fe07001ea865 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 55fd1880efbe..bc4c4755aeb9 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdumpinfo.c b/drivers/acpi/acpica/rsdumpinfo.c
index da150e17795b..c4a2a08e31ac 100644
--- a/drivers/acpi/acpica/rsdumpinfo.c
+++ b/drivers/acpi/acpica/rsdumpinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index b0e50518d766..e819bb0f45af 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index b7a47fbc519b..eafd993592f6 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 092a733c42b8..aabd73298eb8 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 36a6657dd34d..11214780ea8f 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 273eecb3001b..05e375abc6b5 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index cc4b5486c4bc..7b4627181cc6 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
index 14d12d6eb716..87dac2812072 100644
--- a/drivers/acpi/acpica/rsserial.c
+++ b/drivers/acpi/acpica/rsserial.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index b2aeca01204a..49ff7f851d58 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index be65e65e216e..3b481f0b81c5 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index b19a2f0ea331..ec69267f1447 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 5f051d82188d..d1763c5e4e91 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index fea89c8d305c..999a64a48e1a 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 0dfc0ac3c141..00be16da1ee2 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index edfd7b10be19..8cdcdd2c4697 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0c6768d20395..30d40ff8992b 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5ecb8d2e6834..dca91b6f8cc2 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index d81f442228b8..e09b4b26300e 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index f9f9a7da2cad..abf3c62e1e80 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
index 26a0633115be..d8540f380ae5 100644
--- a/drivers/acpi/acpica/utaddress.c
+++ b/drivers/acpi/acpica/utaddress.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 5594a359dbf1..12fbaddbfb0d 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utascii.c b/drivers/acpi/acpica/utascii.c
index 909bdb198651..95565e46a695 100644
--- a/drivers/acpi/acpica/utascii.c
+++ b/drivers/acpi/acpica/utascii.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utbuffer.c b/drivers/acpi/acpica/utbuffer.c
index f17eaa009dde..2c5a14c2f46b 100644
--- a/drivers/acpi/acpica/utbuffer.c
+++ b/drivers/acpi/acpica/utbuffer.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcache.c b/drivers/acpi/acpica/utcache.c
index 531493306dee..08e6944404b3 100644
--- a/drivers/acpi/acpica/utcache.c
+++ b/drivers/acpi/acpica/utcache.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index e9382255d6c6..01434af99035 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index cff7154b7fee..2201be1bf4c2 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 55debbad487d..1a3f316a18a8 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index c6eb9fae70f9..7d8d0208f0a3 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 42388dcb5ccc..ce5e891291bf 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 3fce7519c690..b8be0b82a130 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utexcep.c b/drivers/acpi/acpica/utexcep.c
index eb6dcab33d2f..e3dbad8b73e5 100644
--- a/drivers/acpi/acpica/utexcep.c
+++ b/drivers/acpi/acpica/utexcep.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 230a50c82f22..933595b0e594 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index fb406daf47fa..f5886d557a94 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index a6eb580ee21d..db3c3c1d33da 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 45eeb0dcf283..a2005b030347 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index db2d9910866e..0636074a4c23 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 2055a858e5f5..eddf71990433 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 45c78c2adbf0..a331313ad5fa 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 524ba931d5e8..6767bd1626f7 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utnonansi.c b/drivers/acpi/acpica/utnonansi.c
index 33a0970646df..94219610e259 100644
--- a/drivers/acpi/acpica/utnonansi.c
+++ b/drivers/acpi/acpica/utnonansi.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index cb3db9fed50d..375901c0a596 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index f6b8dd24b006..00ea104f6a0a 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utownerid.c b/drivers/acpi/acpica/utownerid.c
index 1b3ee74a87eb..9923dfa708be 100644
--- a/drivers/acpi/acpica/utownerid.c
+++ b/drivers/acpi/acpica/utownerid.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utpredef.c b/drivers/acpi/acpica/utpredef.c
index 350709f23e4c..ae6fef02b692 100644
--- a/drivers/acpi/acpica/utpredef.c
+++ b/drivers/acpi/acpica/utpredef.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index c008589b41bd..ac07700f5b79 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresdecode.c b/drivers/acpi/acpica/utresdecode.c
index e15a2538558b..93fa3450ca88 100644
--- a/drivers/acpi/acpica/utresdecode.c
+++ b/drivers/acpi/acpica/utresdecode.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index f9801d13547f..4d289d9c734c 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index eafabcd2fada..7750c48739d8 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index 9eacbcb9e4f4..a9507d1976ff 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstrsuppt.c b/drivers/acpi/acpica/utstrsuppt.c
index 97f48d71f9e6..6fc76f0b60e9 100644
--- a/drivers/acpi/acpica/utstrsuppt.c
+++ b/drivers/acpi/acpica/utstrsuppt.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index e2067dcb9389..9f7cef1de34a 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 633b4e2c669f..8cc70ca4e0fb 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utuuid.c b/drivers/acpi/acpica/utuuid.c
index 5028e06718b1..95946fdb55d5 100644
--- a/drivers/acpi/acpica/utuuid.c
+++ b/drivers/acpi/acpica/utuuid.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 6b9ba4029f8e..25ef2ce64603 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 9da4f8ef2e77..a78861ded894 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
index 6d5180601cf2..e727db52a55e 100644
--- a/drivers/acpi/acpica/utxfinit.c
+++ b/drivers/acpi/acpica/utxfinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
index 0b85f113f726..764782fcf1bd 100644
--- a/drivers/acpi/acpica/utxfmutex.c
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 19bc440820e6..7128488a3a72 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1209,6 +1209,22 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
},
},
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX360UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
+ },
+ },
+ {
+ .callback = battery_full_discharging_quirk,
+ .ident = "ASUS UX410UAK",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index f87ed3be779a..676c9788e1c8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
return 0;
}
#endif
+static int set_gbl_term_list(const struct dmi_system_id *id)
+{
+ acpi_gbl_parse_table_as_term_list = 1;
+ return 0;
+}
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
+static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
+ /*
+ * Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
+ * mode.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=198515
+ */
+ {
+ .callback = set_gbl_term_list,
+ .ident = "Dell Precision M5530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
+ },
+ },
+ {
+ .callback = set_gbl_term_list,
+ .ident = "Dell XPS 15 9570",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
+ },
+ },
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
+ * DSDT will be copied to memory.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
*/
{
@@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
+static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
{}
};
#endif
@@ -108,6 +135,7 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
}
return status;
}
+EXPORT_SYMBOL_GPL(acpi_bus_get_status_handle);
int acpi_bus_get_status(struct acpi_device *device)
{
@@ -119,6 +147,12 @@ int acpi_bus_get_status(struct acpi_device *device)
return 0;
}
+ /* Battery devices must have their deps met before calling _STA */
+ if (acpi_device_is_battery(device) && device->dep_unmet) {
+ acpi_set_device_status(device, 0);
+ return 0;
+ }
+
status = acpi_bus_get_status_handle(device->handle, &sta);
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -1019,11 +1053,8 @@ void __init acpi_early_init(void)
acpi_permanent_mmap = true;
- /*
- * If the machine falls into the DMI check table,
- * DSDT will be copied to memory
- */
- dmi_check_system(dsdt_dmi_table);
+ /* Check machine-specific quirks */
+ dmi_check_system(acpi_quirks_dmi_table);
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 06ea4749ebd9..0afbb2658cbc 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -119,7 +119,7 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
* to PCC commands. Keeping it high enough to cover emulators where
* the processors run painfully slow.
*/
-#define NUM_RETRIES 500
+#define NUM_RETRIES 500ULL
struct cppc_attr {
struct attribute attr;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5f0071c7e2e1..abb559cd28d7 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -292,6 +292,9 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
pr->power.states[ACPI_STATE_C1].valid = 1;
pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
+
+ snprintf(pr->power.states[ACPI_STATE_C1].desc,
+ ACPI_CX_DESC_LEN, "ACPI HLT");
}
/* the C0 state only exists as a filler in our array */
pr->power.states[ACPI_STATE_C0].valid = 1;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 18b72eec3507..c7cf48ad5cb9 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -159,7 +159,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
- if (ignore_ppc) {
+ if (ignore_ppc || !pr->performance) {
/*
* Only when it is notification event, the _OST object
* will be evaluated. Otherwise it is skipped.
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 2fa8304171e0..7a3431018e0a 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -275,8 +275,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
device->driver_data = hc;
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
- printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
- hc->ec, hc->offset, hc->query_bit);
+ dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
+ hc->offset, hc->query_bit);
return 0;
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b0fe5272c76a..8e63d937babb 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1565,6 +1565,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
+ /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
+ device->dep_unmet = 1;
}
void acpi_device_add_finalize(struct acpi_device *device)
@@ -1588,6 +1590,14 @@ static int acpi_add_single_object(struct acpi_device **child,
}
acpi_init_device_object(device, handle, type, sta);
+ /*
+ * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
+ * that we can call acpi_bus_get_status() and use its quirk handling.
+ * Note this must be done before the get power-/wakeup_dev-flags calls.
+ */
+ if (type == ACPI_BUS_TYPE_DEVICE)
+ acpi_bus_get_status(device);
+
acpi_bus_get_power_flags(device);
acpi_bus_get_wakeup_device_flags(device);
@@ -1660,9 +1670,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
return -ENODEV;
*type = ACPI_BUS_TYPE_DEVICE;
- status = acpi_bus_get_status_handle(handle, sta);
- if (ACPI_FAILURE(status))
- *sta = 0;
+ /*
+ * acpi_add_single_object updates this once we've an acpi_device
+ * so that acpi_bus_get_status' quirk handling can be used.
+ */
+ *sta = 0;
break;
case ACPI_TYPE_PROCESSOR:
*type = ACPI_BUS_TYPE_PROCESSOR;
@@ -1760,6 +1772,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
acpi_status status;
int i;
+ adev->dep_unmet = 0;
+
if (!acpi_has_method(adev->handle, "_DEP"))
return;
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 324b35bfe781..89e97d21a89c 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -21,7 +21,7 @@
* occasionally getting stuck as 1. To avoid the potential for a hang, check
* TXFE == 0 instead of BUSY == 1. This may not be suitable for all UART
* implementations, so only do so if an affected platform is detected in
- * parse_spcr().
+ * acpi_parse_spcr().
*/
bool qdf2400_e44_present;
EXPORT_SYMBOL(qdf2400_e44_present);
@@ -74,19 +74,21 @@ static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
}
/**
- * parse_spcr() - parse ACPI SPCR table and add preferred console
+ * acpi_parse_spcr() - parse ACPI SPCR table and add preferred console
*
- * @earlycon: set up earlycon for the console specified by the table
+ * @enable_earlycon: set up earlycon for the console specified by the table
+ * @enable_console: setup the console specified by the table.
*
* For the architectures with support for ACPI, CONFIG_ACPI_SPCR_TABLE may be
* defined to parse ACPI SPCR table. As a result of the parsing preferred
- * console is registered and if @earlycon is true, earlycon is set up.
+ * console is registered and if @enable_earlycon is true, earlycon is set up.
+ * If @enable_console is true the system console is also configured.
*
* When CONFIG_ACPI_SPCR_TABLE is defined, this function should be called
* from arch initialization code as soon as the DT/ACPI decision is made.
*
*/
-int __init parse_spcr(bool earlycon)
+int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
{
static char opts[64];
struct acpi_table_spcr *table;
@@ -105,11 +107,8 @@ int __init parse_spcr(bool earlycon)
if (ACPI_FAILURE(status))
return -ENOENT;
- if (table->header.revision < 2) {
- err = -ENOENT;
- pr_err("wrong table version\n");
- goto done;
- }
+ if (table->header.revision < 2)
+ pr_info("SPCR table version %d\n", table->header.revision);
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
switch (ACPI_ACCESS_BIT_WIDTH((
@@ -185,7 +184,7 @@ int __init parse_spcr(bool earlycon)
*/
if (qdf2400_erratum_44_present(&table->header)) {
qdf2400_e44_present = true;
- if (earlycon)
+ if (enable_earlycon)
uart = "qdf2400_e44";
}
@@ -205,11 +204,13 @@ int __init parse_spcr(bool earlycon)
pr_info("console: %s\n", opts);
- if (earlycon)
+ if (enable_earlycon)
setup_earlycon(opts);
- err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
-
+ if (enable_console)
+ err = add_preferred_console(uart, 0, opts + strlen(uart) + 1);
+ else
+ err = 0;
done:
acpi_put_table((struct acpi_table_header *)table);
return err;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 80ce2a7d224b..7bcb66ccccf3 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -456,7 +456,8 @@ static const char * const table_sigs[] = {
ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
- ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
+ ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT,
+ NULL };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index e58538c29377..29f102dcfec4 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -738,13 +738,13 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
#else
/* this is pretty, but avoids _divdu3 and is mostly correct */
mult = he_dev->atm_dev->link_rate / ATM_OC3_PCR;
- if (rate_cps > (272 * mult))
+ if (rate_cps > (272ULL * mult))
buf = 4;
- else if (rate_cps > (204 * mult))
+ else if (rate_cps > (204ULL * mult))
buf = 3;
- else if (rate_cps > (136 * mult))
+ else if (rate_cps > (136ULL * mult))
buf = 2;
- else if (rate_cps > (68 * mult))
+ else if (rate_cps > (68ULL * mult))
buf = 1;
else
buf = 0;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 528b24149bc7..1ea0e2502e8e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2290,6 +2290,38 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
return 0;
}
+static int genpd_iterate_idle_states(struct device_node *dn,
+ struct genpd_power_state *states)
+{
+ int ret;
+ struct of_phandle_iterator it;
+ struct device_node *np;
+ int i = 0;
+
+ ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
+ if (ret <= 0)
+ return ret;
+
+ /* Loop over the phandles until all the requested entry is found */
+ of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
+ np = it.node;
+ if (!of_match_node(idle_state_match, np))
+ continue;
+ if (states) {
+ ret = genpd_parse_state(&states[i], np);
+ if (ret) {
+ pr_err("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
+ of_node_put(np);
+ return ret;
+ }
+ }
+ i++;
+ }
+
+ return i;
+}
+
/**
* of_genpd_parse_idle_states: Return array of idle states for the genpd.
*
@@ -2299,49 +2331,31 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
*
* Returns the device states parsed from the OF node. The memory for the states
* is allocated by this function and is the responsibility of the caller to
- * free the memory after use.
+ * free the memory after use. If no domain idle states is found it returns
+ * -EINVAL and in case of errors, a negative error code.
*/
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
{
struct genpd_power_state *st;
- struct device_node *np;
- int i = 0;
- int err, ret;
- int count;
- struct of_phandle_iterator it;
- const struct of_device_id *match_id;
+ int ret;
- count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
- if (count <= 0)
- return -EINVAL;
+ ret = genpd_iterate_idle_states(dn, NULL);
+ if (ret <= 0)
+ return ret < 0 ? ret : -EINVAL;
- st = kcalloc(count, sizeof(*st), GFP_KERNEL);
+ st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
- /* Loop over the phandles until all the requested entry is found */
- of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
- np = it.node;
- match_id = of_match_node(idle_state_match, np);
- if (!match_id)
- continue;
- ret = genpd_parse_state(&st[i++], np);
- if (ret) {
- pr_err
- ("Parsing idle state node %pOF failed with err %d\n",
- np, ret);
- of_node_put(np);
- kfree(st);
- return ret;
- }
+ ret = genpd_iterate_idle_states(dn, st);
+ if (ret <= 0) {
+ kfree(st);
+ return ret < 0 ? ret : -EINVAL;
}
- *n = i;
- if (!i)
- kfree(st);
- else
- *states = st;
+ *states = st;
+ *n = ret;
return 0;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cc93522a6d41..8e40da093766 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -124,11 +124,13 @@ static int atomic_dec_return_safe(atomic_t *v)
#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
#define RBD_FEATURE_DATA_POOL (1ULL<<7)
+#define RBD_FEATURE_OPERATIONS (1ULL<<8)
#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
RBD_FEATURE_STRIPINGV2 | \
RBD_FEATURE_EXCLUSIVE_LOCK | \
- RBD_FEATURE_DATA_POOL)
+ RBD_FEATURE_DATA_POOL | \
+ RBD_FEATURE_OPERATIONS)
/* Features supported by this (client software) implementation. */
@@ -281,7 +283,6 @@ struct rbd_obj_request {
int result;
rbd_obj_callback_t callback;
- struct completion completion;
struct kref kref;
};
@@ -1734,10 +1735,7 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p cb %p\n", __func__, obj_request,
obj_request->callback);
- if (obj_request->callback)
- obj_request->callback(obj_request);
- else
- complete_all(&obj_request->completion);
+ obj_request->callback(obj_request);
}
static void rbd_obj_request_error(struct rbd_obj_request *obj_request, int err)
@@ -2013,7 +2011,6 @@ rbd_obj_request_create(enum obj_request_type type)
obj_request->which = BAD_WHICH;
obj_request->type = type;
INIT_LIST_HEAD(&obj_request->links);
- init_completion(&obj_request->completion);
kref_init(&obj_request->kref);
dout("%s %p\n", __func__, obj_request);
@@ -2129,15 +2126,13 @@ static struct rbd_img_request *rbd_img_request_create(
{
struct rbd_img_request *img_request;
- img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
+ img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
if (!img_request)
return NULL;
- img_request->rq = NULL;
img_request->rbd_dev = rbd_dev;
img_request->offset = offset;
img_request->length = length;
- img_request->flags = 0;
if (op_type == OBJ_OP_DISCARD) {
img_request_discard_set(img_request);
img_request->snapc = snapc;
@@ -2149,11 +2144,8 @@ static struct rbd_img_request *rbd_img_request_create(
}
if (rbd_dev_parent_get(rbd_dev))
img_request_layered_set(img_request);
+
spin_lock_init(&img_request->completion_lock);
- img_request->next_completion = 0;
- img_request->callback = NULL;
- img_request->result = 0;
- img_request->obj_request_count = 0;
INIT_LIST_HEAD(&img_request->obj_requests);
kref_init(&img_request->kref);
@@ -2692,8 +2684,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
parent_request->copyup_pages = NULL;
parent_request->copyup_page_count = 0;
- parent_request->obj_request = NULL;
- rbd_obj_request_put(obj_request);
out_err:
if (pages)
ceph_release_page_vector(pages, page_count);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 79908e6ddbf2..4a07593c2efd 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -373,14 +373,12 @@ static ssize_t virtblk_serial_show(struct device *dev,
static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
-static void virtblk_config_changed_work(struct work_struct *work)
+/* The queue's logical block size must be set before calling this */
+static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
{
- struct virtio_blk *vblk =
- container_of(work, struct virtio_blk, config_work);
struct virtio_device *vdev = vblk->vdev;
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
- char *envp[] = { "RESIZE=1", NULL };
unsigned long long nblocks;
u64 capacity;
@@ -402,13 +400,24 @@ static void virtblk_config_changed_work(struct work_struct *work)
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
- "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
+ vblk->disk->disk_name,
+ resize ? "new size: " : "",
nblocks,
queue_logical_block_size(q),
cap_str_10,
cap_str_2);
set_capacity(vblk->disk, capacity);
+}
+
+static void virtblk_config_changed_work(struct work_struct *work)
+{
+ struct virtio_blk *vblk =
+ container_of(work, struct virtio_blk, config_work);
+ char *envp[] = { "RESIZE=1", NULL };
+
+ virtblk_update_capacity(vblk, true);
revalidate_disk(vblk->disk);
kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
}
@@ -621,7 +630,6 @@ static int virtblk_probe(struct virtio_device *vdev)
struct request_queue *q;
int err, index;
- u64 cap;
u32 v, blk_size, sg_elems, opt_io_size;
u16 min_io_size;
u8 physical_block_exp, alignment_offset;
@@ -719,17 +727,6 @@ static int virtblk_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
set_disk_ro(vblk->disk, 1);
- /* Host must always specify the capacity. */
- virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
-
- /* If capacity is too big, truncate with warning. */
- if ((sector_t)cap != cap) {
- dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
- (unsigned long long)cap);
- cap = (sector_t)-1;
- }
- set_capacity(vblk->disk, cap);
-
/* We can handle whatever the host told us to handle. */
blk_queue_max_segments(q, vblk->sg_elems-2);
@@ -780,6 +777,7 @@ static int virtblk_probe(struct virtio_device *vdev)
if (!err && opt_io_size)
blk_queue_io_opt(q, blk_size * opt_io_size);
+ virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
device_add_disk(&vdev->dev, vblk->disk);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d8addbce40bc..608af20a3494 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -239,16 +239,6 @@ if PPC32 || PPC64
source "drivers/cpufreq/Kconfig.powerpc"
endif
-if AVR32
-config AVR32_AT32AP_CPUFREQ
- bool "CPU frequency driver for AT32AP"
- depends on PLATFORM_AT32AP
- default n
- help
- This enables the CPU frequency driver for AT32AP processors.
- If in doubt, say N.
-endif
-
if IA64
config IA64_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e07715ce8844..c60c1e141d9d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -100,7 +100,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
##################################################################################
# Other platform drivers
-obj-$(CONFIG_AVR32_AT32AP_CPUFREQ) += at32ap-cpufreq.o
obj-$(CONFIG_BFIN_CPU_FREQ) += blackfin-cpufreq.o
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index 042023bbbf62..be926d9a66e5 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <linux/percpu-defs.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
@@ -109,12 +110,18 @@ out:
static int __init amd_freq_sensitivity_init(void)
{
u64 val;
+ struct pci_dev *pcidev;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return -ENODEV;
- if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
- return -ENODEV;
+ pcidev = pci_get_device(PCI_VENDOR_ID_AMD,
+ PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, NULL);
+
+ if (!pcidev) {
+ if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK))
+ return -ENODEV;
+ }
if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val))
return -ENODEV;
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
deleted file mode 100644
index 7b612c8bb09e..000000000000
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2004-2007 Atmel Corporation
- *
- * Based on MIPS implementation arch/mips/kernel/time.c
- * Copyright 2001 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*#define DEBUG*/
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-
-static struct cpufreq_frequency_table *freq_table;
-
-static unsigned int ref_freq;
-static unsigned long loops_per_jiffy_ref;
-
-static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
-{
- unsigned int old_freq, new_freq;
-
- old_freq = policy->cur;
- new_freq = freq_table[index].frequency;
-
- if (!ref_freq) {
- ref_freq = old_freq;
- loops_per_jiffy_ref = boot_cpu_data.loops_per_jiffy;
- }
-
- if (old_freq < new_freq)
- boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, new_freq);
- clk_set_rate(policy->clk, new_freq * 1000);
- if (new_freq < old_freq)
- boot_cpu_data.loops_per_jiffy = cpufreq_scale(
- loops_per_jiffy_ref, ref_freq, new_freq);
-
- return 0;
-}
-
-static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
-{
- unsigned int frequency, rate, min_freq;
- struct clk *cpuclk;
- int retval, steps, i;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- cpuclk = clk_get(NULL, "cpu");
- if (IS_ERR(cpuclk)) {
- pr_debug("cpufreq: could not get CPU clk\n");
- retval = PTR_ERR(cpuclk);
- goto out_err;
- }
-
- min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
- frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
- policy->cpuinfo.transition_latency = 0;
-
- /*
- * AVR32 CPU frequency rate scales in power of two between maximum and
- * minimum, also add space for the table end marker.
- *
- * Further validate that the frequency is usable, and append it to the
- * frequency table.
- */
- steps = fls(frequency / min_freq) + 1;
- freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
- GFP_KERNEL);
- if (!freq_table) {
- retval = -ENOMEM;
- goto out_err_put_clk;
- }
-
- for (i = 0; i < (steps - 1); i++) {
- rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
-
- if (rate != frequency)
- freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- freq_table[i].frequency = frequency;
-
- frequency /= 2;
- }
-
- policy->clk = cpuclk;
- freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
-
- retval = cpufreq_table_validate_and_show(policy, freq_table);
- if (!retval) {
- printk("cpufreq: AT32AP CPU frequency driver\n");
- return 0;
- }
-
- kfree(freq_table);
-out_err_put_clk:
- clk_put(cpuclk);
-out_err:
- return retval;
-}
-
-static struct cpufreq_driver at32_driver = {
- .name = "at32ap",
- .init = at32_cpufreq_driver_init,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = at32_set_target,
- .get = cpufreq_generic_get,
- .flags = CPUFREQ_STICKY,
-};
-
-static int __init at32_cpufreq_init(void)
-{
- return cpufreq_register_driver(&at32_driver);
-}
-late_initcall(at32_cpufreq_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 421f318c0e66..de33ebf008ad 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1686,6 +1686,9 @@ void cpufreq_resume(void)
if (!cpufreq_driver)
return;
+ if (unlikely(!cpufreq_suspended))
+ return;
+
cpufreq_suspended = false;
if (!has_target() && !cpufreq_driver->resume)
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index b6b369c22272..932caa386ece 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -115,10 +115,10 @@ static struct cpufreq_freqs freqs;
static int init_div_table(void)
{
struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table;
- unsigned int tmp, clk_div, ema_div, freq, volt_id;
+ unsigned int tmp, clk_div, ema_div, freq, volt_id, idx;
struct dev_pm_opp *opp;
- cpufreq_for_each_entry(pos, freq_tbl) {
+ cpufreq_for_each_entry_idx(pos, freq_tbl, idx) {
opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
pos->frequency * 1000, true);
if (IS_ERR(opp)) {
@@ -154,8 +154,7 @@ static int init_div_table(void)
tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
| ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
- __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 *
- (pos - freq_tbl));
+ __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * idx);
dev_pm_opp_put(opp);
}
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 3bbbf9e6960c..6d007f824ca7 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -143,10 +143,9 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
break;
}
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
- i = pos - table;
if ((freq < policy->min) || (freq > policy->max))
continue;
if (freq == target_freq) {
@@ -211,15 +210,16 @@ int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
unsigned int freq)
{
struct cpufreq_frequency_table *pos, *table = policy->freq_table;
+ int idx;
if (unlikely(!table)) {
pr_debug("%s: Unable to find frequency table\n", __func__);
return -ENOENT;
}
- cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_for_each_valid_entry_idx(pos, table, idx)
if (pos->frequency == freq)
- return pos - table;
+ return idx;
return -EINVAL;
}
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index 741f22e5cee3..ff67859948b3 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -504,6 +504,7 @@ static struct platform_driver imx6q_cpufreq_platdrv = {
};
module_platform_driver(imx6q_cpufreq_platdrv);
+MODULE_ALIAS("platform:imx6q-cpufreq");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7edf7a0e5a96..6d084c61ee25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -779,6 +779,8 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
return 0;
}
+static void intel_pstate_hwp_enable(struct cpudata *cpudata);
+
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
if (!hwp_active)
@@ -786,6 +788,9 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
+ if (policy->cpu == 0)
+ intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
+
all_cpu_data[policy->cpu]->epp_policy = 0;
intel_pstate_hwp_set(policy->cpu);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 5faa37c5b091..942632a27b50 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -600,7 +600,7 @@ static void longhaul_setup_voltagescaling(void)
/* Calculate kHz for one voltage step */
kHz_step = (highest_speed - min_vid_speed) / numvscales;
- cpufreq_for_each_entry(freq_pos, longhaul_table) {
+ cpufreq_for_each_entry_idx(freq_pos, longhaul_table, j) {
speed = freq_pos->frequency;
if (speed > min_vid_speed)
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
@@ -609,7 +609,7 @@ static void longhaul_setup_voltagescaling(void)
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
pr_info("f: %d kHz, index: %d, vid: %d mV\n",
- speed, (int)(freq_pos - longhaul_table), vid.mV);
+ speed, j, vid.mV);
}
can_scale_voltage = 1;
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index b257fc7d5204..75dfbd2a58ea 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -139,7 +139,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *pos;
const u32 *max_freqp;
u32 max_freq;
- int cur_astate;
+ int cur_astate, idx;
struct resource res;
struct device_node *cpu, *dn;
int err = -ENODEV;
@@ -198,9 +198,9 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("initializing frequency table\n");
/* initialize frequency table */
- cpufreq_for_each_entry(pos, pas_freqs) {
+ cpufreq_for_each_entry_idx(pos, pas_freqs, idx) {
pos->frequency = get_astate_freq(pos->driver_data) * 100000;
- pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency);
+ pr_debug("%d: %d\n", idx, pos->frequency);
}
cur_astate = get_cur_astate(policy->cpu);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 247fcbfa4cb5..c32a833e1b00 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -145,6 +145,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
if (IS_ERR(priv->clk)) {
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n",
__func__, cpu_dev->id);
+ ret = PTR_ERR(priv->clk);
goto out_free_cpufreq_table;
}
@@ -197,11 +198,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
static void scpi_cpufreq_ready(struct cpufreq_policy *policy)
{
struct scpi_data *priv = policy->driver_data;
- struct thermal_cooling_device *cdev;
- cdev = of_cpufreq_cooling_register(policy);
- if (!IS_ERR(cdev))
- priv->cdev = cdev;
+ priv->cdev = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver scpi_cpufreq_driver = {
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index 6d626606b9c5..b9dfae47aefd 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -1,5 +1,6 @@
config CRYPTO_DEV_CCP_DD
tristate "Secure Processor device driver"
+ depends on CPU_SUP_AMD || ARM64
default m
help
Provides AMD Secure Processor device driver.
@@ -32,3 +33,14 @@ config CRYPTO_DEV_CCP_CRYPTO
Support for using the cryptographic API with the AMD Cryptographic
Coprocessor. This module supports offload of SHA and AES algorithms.
If you choose 'M' here, this module will be called ccp_crypto.
+
+config CRYPTO_DEV_SP_PSP
+ bool "Platform Security Processor (PSP) device"
+ default y
+ depends on CRYPTO_DEV_CCP_DD && X86_64
+ help
+ Provide support for the AMD Platform Security Processor (PSP).
+ The PSP is a dedicated processor that provides support for key
+ management commands in Secure Encrypted Virtualization (SEV) mode,
+ along with software-based Trusted Execution Environment (TEE) to
+ enable third-party trusted applications.
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index c4ce726b931e..51d1c0cf66c7 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -8,6 +8,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
ccp-dmaengine.o \
ccp-debugfs.o
ccp-$(CONFIG_PCI) += sp-pci.o
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
new file mode 100644
index 000000000000..fcfa5b1eae61
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -0,0 +1,805 @@
+/*
+ * AMD Platform Security Processor (PSP) interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/hw_random.h>
+#include <linux/ccp.h>
+
+#include "sp-dev.h"
+#include "psp-dev.h"
+
+#define DEVICE_NAME "sev"
+
+static DEFINE_MUTEX(sev_cmd_mutex);
+static struct sev_misc_dev *misc_dev;
+static struct psp_device *psp_master;
+
+static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct psp_device *psp;
+
+ psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL);
+ if (!psp)
+ return NULL;
+
+ psp->dev = dev;
+ psp->sp = sp;
+
+ snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord);
+
+ return psp;
+}
+
+static irqreturn_t psp_irq_handler(int irq, void *data)
+{
+ struct psp_device *psp = data;
+ unsigned int status;
+ int reg;
+
+ /* Read the interrupt status: */
+ status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ /* Check if it is command completion: */
+ if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
+ goto done;
+
+ /* Check if it is SEV command completion: */
+ reg = ioread32(psp->io_regs + PSP_CMDRESP);
+ if (reg & PSP_CMDRESP_RESP) {
+ psp->sev_int_rcvd = 1;
+ wake_up(&psp->sev_int_queue);
+ }
+
+done:
+ /* Clear the interrupt status by writing the same value we read. */
+ iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ return IRQ_HANDLED;
+}
+
+static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+{
+ psp->sev_int_rcvd = 0;
+
+ wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+ *reg = ioread32(psp->io_regs + PSP_CMDRESP);
+}
+
+static int sev_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
+ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
+ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
+ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
+ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
+ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
+ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
+ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
+ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
+ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
+ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
+ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
+ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
+ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
+ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
+ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
+ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
+ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
+ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
+ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
+ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ default: return 0;
+ }
+
+ return 0;
+}
+
+static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+{
+ struct psp_device *psp = psp_master;
+ unsigned int phys_lsb, phys_msb;
+ unsigned int reg, ret = 0;
+
+ if (!psp)
+ return -ENODEV;
+
+ /* Get the physical address of the command buffer */
+ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
+ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
+
+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
+ cmd, phys_msb, phys_lsb);
+
+ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
+ iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
+
+ reg = cmd;
+ reg <<= PSP_CMDRESP_CMD_SHIFT;
+ reg |= PSP_CMDRESP_IOC;
+ iowrite32(reg, psp->io_regs + PSP_CMDRESP);
+
+ /* wait for command completion */
+ sev_wait_cmd_ioc(psp, &reg);
+
+ if (psp_ret)
+ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
+
+ if (reg & PSP_CMDRESP_ERR_MASK) {
+ dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
+ cmd, reg & PSP_CMDRESP_ERR_MASK);
+ ret = -EIO;
+ }
+
+ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
+ sev_cmd_buffer_len(cmd), false);
+
+ return ret;
+}
+
+static int sev_do_cmd(int cmd, void *data, int *psp_ret)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int __sev_platform_init_locked(int *error)
+{
+ struct psp_device *psp = psp_master;
+ int rc = 0;
+
+ if (!psp)
+ return -ENODEV;
+
+ if (psp->sev_state == SEV_STATE_INIT)
+ return 0;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ psp->sev_state = SEV_STATE_INIT;
+ dev_dbg(psp->dev, "SEV firmware initialized\n");
+
+ return rc;
+}
+
+int sev_platform_init(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_init_locked(error);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(sev_platform_init);
+
+static int __sev_platform_shutdown_locked(int *error)
+{
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
+ if (ret)
+ return ret;
+
+ psp_master->sev_state = SEV_STATE_UNINIT;
+ dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
+
+ return ret;
+}
+
+static int sev_platform_shutdown(int *error)
+{
+ int rc;
+
+ mutex_lock(&sev_cmd_mutex);
+ rc = __sev_platform_shutdown_locked(NULL);
+ mutex_unlock(&sev_cmd_mutex);
+
+ return rc;
+}
+
+static int sev_get_platform_state(int *state, int *error)
+{
+ int rc;
+
+ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
+ &psp_master->status_cmd_buf, error);
+ if (rc)
+ return rc;
+
+ *state = psp_master->status_cmd_buf.state;
+ return rc;
+}
+
+static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+{
+ int state, rc;
+
+ /*
+ * The SEV spec requires that FACTORY_RESET must be issued in
+ * UNINIT state. Before we go further lets check if any guest is
+ * active.
+ *
+ * If FW is in WORKING state then deny the request otherwise issue
+ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
+ *
+ */
+ rc = sev_get_platform_state(&state, &argp->error);
+ if (rc)
+ return rc;
+
+ if (state == SEV_STATE_WORKING)
+ return -EBUSY;
+
+ if (state == SEV_STATE_INIT) {
+ rc = __sev_platform_shutdown_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
+}
+
+static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_status *data = &psp_master->status_cmd_buf;
+ int ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
+ if (ret)
+ return ret;
+
+ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+{
+ int rc;
+
+ if (psp_master->sev_state == SEV_STATE_UNINIT) {
+ rc = __sev_platform_init_locked(&argp->error);
+ if (rc)
+ return rc;
+ }
+
+ return __sev_do_cmd_locked(cmd, 0, &argp->error);
+}
+
+static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pek_csr input;
+ struct sev_data_pek_csr *data;
+ void *blob = NULL;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* userspace wants to query CSR length */
+ if (!input.address || !input.length)
+ goto cmd;
+
+ /* allocate a physically contiguous buffer to store the CSR blob */
+ if (!access_ok(VERIFY_WRITE, input.address, input.length) ||
+ input.length > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ blob = kmalloc(input.length, GFP_KERNEL);
+ if (!blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->address = __psp_pa(blob);
+ data->len = input.length;
+
+cmd:
+ if (psp_master->sev_state == SEV_STATE_UNINIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_blob;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
+
+ /* If we query the CSR length, FW responded with expected data. */
+ input.length = data->len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_blob;
+ }
+
+ if (blob) {
+ if (copy_to_user((void __user *)input.address, blob, input.length))
+ ret = -EFAULT;
+ }
+
+e_free_blob:
+ kfree(blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+{
+ void *data;
+
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+ /* verify that blob length does not exceed our limit */
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
+ goto e_free;
+
+ return data;
+
+e_free:
+ kfree(data);
+ return ERR_PTR(-EFAULT);
+}
+EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pek_cert_import input;
+ struct sev_data_pek_cert_import *data;
+ void *pek_blob, *oca_blob;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* copy PEK certificate blobs from userspace */
+ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
+ if (IS_ERR(pek_blob)) {
+ ret = PTR_ERR(pek_blob);
+ goto e_free;
+ }
+
+ data->pek_cert_address = __psp_pa(pek_blob);
+ data->pek_cert_len = input.pek_cert_len;
+
+ /* copy PEK certificate blobs from userspace */
+ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
+ if (IS_ERR(oca_blob)) {
+ ret = PTR_ERR(oca_blob);
+ goto e_free_pek;
+ }
+
+ data->oca_cert_address = __psp_pa(oca_blob);
+ data->oca_cert_len = input.oca_cert_len;
+
+ /* If platform is not in INIT state then transition it to INIT */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_oca;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
+
+e_free_oca:
+ kfree(oca_blob);
+e_free_pek:
+ kfree(pek_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+{
+ struct sev_user_data_pdh_cert_export input;
+ void *pdh_blob = NULL, *cert_blob = NULL;
+ struct sev_data_pdh_cert_export *data;
+ int ret;
+
+ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Userspace wants to query the certificate length. */
+ if (!input.pdh_cert_address ||
+ !input.pdh_cert_len ||
+ !input.cert_chain_address)
+ goto cmd;
+
+ /* Allocate a physically contiguous buffer to store the PDH blob. */
+ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ /* Allocate a physically contiguous buffer to store the cert chain blob. */
+ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
+ !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) {
+ ret = -EFAULT;
+ goto e_free;
+ }
+
+ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
+ if (!pdh_blob) {
+ ret = -ENOMEM;
+ goto e_free;
+ }
+
+ data->pdh_cert_address = __psp_pa(pdh_blob);
+ data->pdh_cert_len = input.pdh_cert_len;
+
+ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
+ if (!cert_blob) {
+ ret = -ENOMEM;
+ goto e_free_pdh;
+ }
+
+ data->cert_chain_address = __psp_pa(cert_blob);
+ data->cert_chain_len = input.cert_chain_len;
+
+cmd:
+ /* If platform is not in INIT state then transition it to INIT. */
+ if (psp_master->sev_state != SEV_STATE_INIT) {
+ ret = __sev_platform_init_locked(&argp->error);
+ if (ret)
+ goto e_free_cert;
+ }
+
+ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
+
+ /* If we query the length, FW responded with expected data. */
+ input.cert_chain_len = data->cert_chain_len;
+ input.pdh_cert_len = data->pdh_cert_len;
+
+ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+
+ if (pdh_blob) {
+ if (copy_to_user((void __user *)input.pdh_cert_address,
+ pdh_blob, input.pdh_cert_len)) {
+ ret = -EFAULT;
+ goto e_free_cert;
+ }
+ }
+
+ if (cert_blob) {
+ if (copy_to_user((void __user *)input.cert_chain_address,
+ cert_blob, input.cert_chain_len))
+ ret = -EFAULT;
+ }
+
+e_free_cert:
+ kfree(cert_blob);
+e_free_pdh:
+ kfree(pdh_blob);
+e_free:
+ kfree(data);
+ return ret;
+}
+
+static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct sev_issue_cmd input;
+ int ret = -EFAULT;
+
+ if (!psp_master)
+ return -ENODEV;
+
+ if (ioctl != SEV_ISSUE_CMD)
+ return -EINVAL;
+
+ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
+ return -EFAULT;
+
+ if (input.cmd > SEV_MAX)
+ return -EINVAL;
+
+ mutex_lock(&sev_cmd_mutex);
+
+ switch (input.cmd) {
+
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
+ ret = -EFAULT;
+out:
+ mutex_unlock(&sev_cmd_mutex);
+
+ return ret;
+}
+
+static const struct file_operations sev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sev_ioctl,
+};
+
+int sev_platform_status(struct sev_user_data_status *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_platform_status);
+
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_deactivate);
+
+int sev_guest_activate(struct sev_data_activate *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_activate);
+
+int sev_guest_decommission(struct sev_data_decommission *data, int *error)
+{
+ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+int sev_guest_df_flush(int *error)
+{
+ return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
+}
+EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+static void sev_exit(struct kref *ref)
+{
+ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
+
+ misc_deregister(&misc_dev->misc);
+}
+
+static int sev_misc_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ int ret;
+
+ /*
+ * SEV feature support can be detected on multiple devices but the SEV
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sev on the first device probe.
+ * sev_do_cmd() finds the right master device to which to issue the
+ * command to the firmware.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = DEVICE_NAME;
+ misc->fops = &sev_fops;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ init_waitqueue_head(&psp->sev_int_queue);
+ psp->sev_misc = misc_dev;
+ dev_dbg(dev, "registered SEV device\n");
+
+ return 0;
+}
+
+static int sev_init(struct psp_device *psp)
+{
+ /* Check if device supports SEV feature */
+ if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
+ dev_dbg(psp->dev, "device does not support SEV\n");
+ return 1;
+ }
+
+ return sev_misc_init(psp);
+}
+
+int psp_dev_init(struct sp_device *sp)
+{
+ struct device *dev = sp->dev;
+ struct psp_device *psp;
+ int ret;
+
+ ret = -ENOMEM;
+ psp = psp_alloc_struct(sp);
+ if (!psp)
+ goto e_err;
+
+ sp->psp_data = psp;
+
+ psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata;
+ if (!psp->vdata) {
+ ret = -ENODEV;
+ dev_err(dev, "missing driver data\n");
+ goto e_err;
+ }
+
+ psp->io_regs = sp->io_map + psp->vdata->offset;
+
+ /* Disable and clear interrupts until ready */
+ iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
+ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
+
+ /* Request an irq */
+ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
+ if (ret) {
+ dev_err(dev, "psp: unable to allocate an IRQ\n");
+ goto e_err;
+ }
+
+ ret = sev_init(psp);
+ if (ret)
+ goto e_irq;
+
+ if (sp->set_psp_master_device)
+ sp->set_psp_master_device(sp);
+
+ /* Enable interrupt */
+ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
+
+ return 0;
+
+e_irq:
+ sp_free_psp_irq(psp->sp, psp);
+e_err:
+ sp->psp_data = NULL;
+
+ dev_notice(dev, "psp initialization failed\n");
+
+ return ret;
+}
+
+void psp_dev_destroy(struct sp_device *sp)
+{
+ struct psp_device *psp = sp->psp_data;
+
+ if (psp->sev_misc)
+ kref_put(&misc_dev->refcount, sev_exit);
+
+ sp_free_psp_irq(sp, psp);
+}
+
+int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
+ void *data, int *error)
+{
+ if (!filep || filep->f_op != &sev_fops)
+ return -EBADF;
+
+ return sev_do_cmd(cmd, data, error);
+}
+EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+void psp_pci_init(void)
+{
+ struct sev_user_data_status *status;
+ struct sp_device *sp;
+ int error, rc;
+
+ sp = sp_get_psp_master_device();
+ if (!sp)
+ return;
+
+ psp_master = sp->psp_data;
+
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc) {
+ dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
+ goto err;
+ }
+
+ /* Display SEV firmware version */
+ status = &psp_master->status_cmd_buf;
+ rc = sev_platform_status(status, &error);
+ if (rc) {
+ dev_err(sp->dev, "SEV: failed to get status error %#x\n", error);
+ goto err;
+ }
+
+ dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major,
+ status->api_minor, status->build);
+ return;
+
+err:
+ psp_master = NULL;
+}
+
+void psp_pci_exit(void)
+{
+ if (!psp_master)
+ return;
+
+ sev_platform_shutdown(NULL);
+}
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
new file mode 100644
index 000000000000..c81f0b11287a
--- /dev/null
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -0,0 +1,83 @@
+/*
+ * AMD Platform Security Processor (PSP) interface driver
+ *
+ * Copyright (C) 2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_DEV_H__
+#define __PSP_DEV_H__
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/dmapool.h>
+#include <linux/hw_random.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/dmaengine.h>
+#include <linux/psp-sev.h>
+#include <linux/miscdevice.h>
+
+#include "sp-dev.h"
+
+#define PSP_C2PMSG(_num) ((_num) << 2)
+#define PSP_CMDRESP PSP_C2PMSG(32)
+#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
+#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
+#define PSP_FEATURE_REG PSP_C2PMSG(63)
+
+#define PSP_P2CMSG(_num) ((_num) << 2)
+#define PSP_CMD_COMPLETE_REG 1
+#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
+
+#define PSP_P2CMSG_INTEN 0x0110
+#define PSP_P2CMSG_INTSTS 0x0114
+
+#define PSP_C2PMSG_ATTR_0 0x0118
+#define PSP_C2PMSG_ATTR_1 0x011c
+#define PSP_C2PMSG_ATTR_2 0x0120
+#define PSP_C2PMSG_ATTR_3 0x0124
+#define PSP_P2CMSG_ATTR_0 0x0128
+
+#define PSP_CMDRESP_CMD_SHIFT 16
+#define PSP_CMDRESP_IOC BIT(0)
+#define PSP_CMDRESP_RESP BIT(31)
+#define PSP_CMDRESP_ERR_MASK 0xffff
+
+#define MAX_PSP_NAME_LEN 16
+
+struct sev_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct psp_device {
+ struct list_head entry;
+
+ struct psp_vdata *vdata;
+ char name[MAX_PSP_NAME_LEN];
+
+ struct device *dev;
+ struct sp_device *sp;
+
+ void __iomem *io_regs;
+
+ int sev_state;
+ unsigned int sev_int_rcvd;
+ wait_queue_head_t sev_int_queue;
+ struct sev_misc_dev *sev_misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
+};
+
+#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index bef387c8abfd..eb0da6572720 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -198,6 +198,8 @@ int sp_init(struct sp_device *sp)
if (sp->dev_vdata->ccp_vdata)
ccp_dev_init(sp);
+ if (sp->dev_vdata->psp_vdata)
+ psp_dev_init(sp);
return 0;
}
@@ -206,6 +208,9 @@ void sp_destroy(struct sp_device *sp)
if (sp->dev_vdata->ccp_vdata)
ccp_dev_destroy(sp);
+ if (sp->dev_vdata->psp_vdata)
+ psp_dev_destroy(sp);
+
sp_del_device(sp);
}
@@ -237,6 +242,27 @@ int sp_resume(struct sp_device *sp)
}
#endif
+struct sp_device *sp_get_psp_master_device(void)
+{
+ struct sp_device *i, *ret = NULL;
+ unsigned long flags;
+
+ write_lock_irqsave(&sp_unit_lock, flags);
+ if (list_empty(&sp_units))
+ goto unlock;
+
+ list_for_each_entry(i, &sp_units, entry) {
+ if (i->psp_data)
+ break;
+ }
+
+ if (i->get_psp_master_device)
+ ret = i->get_psp_master_device();
+unlock:
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+ return ret;
+}
+
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
@@ -246,6 +272,10 @@ static int __init sp_mod_init(void)
if (ret)
return ret;
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ psp_pci_init();
+#endif
+
return 0;
#endif
@@ -265,6 +295,11 @@ static int __init sp_mod_init(void)
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ psp_pci_exit();
+#endif
+
sp_pci_exit();
#endif
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 5ab486ade1ad..acb197b66ced 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -42,12 +42,17 @@ struct ccp_vdata {
const unsigned int offset;
const unsigned int rsamax;
};
+
+struct psp_vdata {
+ const unsigned int offset;
+};
+
/* Structure to hold SP device data */
struct sp_dev_vdata {
const unsigned int bar;
const struct ccp_vdata *ccp_vdata;
- void *psp_vdata;
+ const struct psp_vdata *psp_vdata;
};
struct sp_device {
@@ -68,6 +73,10 @@ struct sp_device {
/* DMA caching attribute support */
unsigned int axcache;
+ /* get and set master device */
+ struct sp_device*(*get_psp_master_device)(void);
+ void (*set_psp_master_device)(struct sp_device *);
+
bool irq_registered;
bool use_tasklet;
@@ -103,6 +112,7 @@ void sp_free_ccp_irq(struct sp_device *sp, void *data);
int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
const char *name, void *data);
void sp_free_psp_irq(struct sp_device *sp, void *data);
+struct sp_device *sp_get_psp_master_device(void);
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
@@ -130,4 +140,20 @@ static inline int ccp_dev_resume(struct sp_device *sp)
}
#endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+int psp_dev_init(struct sp_device *sp);
+void psp_pci_init(void);
+void psp_dev_destroy(struct sp_device *sp);
+void psp_pci_exit(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_dev_init(struct sp_device *sp) { return 0; }
+static inline void psp_pci_init(void) { }
+static inline void psp_dev_destroy(struct sp_device *sp) { }
+static inline void psp_pci_exit(void) { }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
#endif
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 9859aa683a28..f5f43c50698a 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -25,6 +25,7 @@
#include <linux/ccp.h>
#include "ccp-dev.h"
+#include "psp-dev.h"
#define MSIX_VECTORS 2
@@ -32,6 +33,7 @@ struct sp_pci {
int msix_count;
struct msix_entry msix_entry[MSIX_VECTORS];
};
+static struct sp_device *sp_dev_master;
static int sp_get_msix_irqs(struct sp_device *sp)
{
@@ -108,6 +110,45 @@ static void sp_free_irqs(struct sp_device *sp)
sp->psp_irq = 0;
}
+static bool sp_pci_is_master(struct sp_device *sp)
+{
+ struct device *dev_cur, *dev_new;
+ struct pci_dev *pdev_cur, *pdev_new;
+
+ dev_new = sp->dev;
+ dev_cur = sp_dev_master->dev;
+
+ pdev_new = to_pci_dev(dev_new);
+ pdev_cur = to_pci_dev(dev_cur);
+
+ if (pdev_new->bus->number < pdev_cur->bus->number)
+ return true;
+
+ if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
+ return true;
+
+ if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
+ return true;
+
+ return false;
+}
+
+static void psp_set_master(struct sp_device *sp)
+{
+ if (!sp_dev_master) {
+ sp_dev_master = sp;
+ return;
+ }
+
+ if (sp_pci_is_master(sp))
+ sp_dev_master = sp;
+}
+
+static struct sp_device *psp_get_master(void)
+{
+ return sp_dev_master;
+}
+
static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct sp_device *sp;
@@ -166,6 +207,8 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto e_err;
pci_set_master(pdev);
+ sp->set_psp_master_device = psp_set_master;
+ sp->get_psp_master_device = psp_get_master;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
@@ -225,6 +268,12 @@ static int sp_pci_resume(struct pci_dev *pdev)
}
#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+static const struct psp_vdata psp_entry = {
+ .offset = 0x10500,
+};
+#endif
+
static const struct sp_dev_vdata dev_vdata[] = {
{
.bar = 2,
@@ -237,6 +286,9 @@ static const struct sp_dev_vdata dev_vdata[] = {
#ifdef CONFIG_CRYPTO_DEV_SP_CCP
.ccp_vdata = &ccpv5a,
#endif
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &psp_entry
+#endif
},
{
.bar = 2,
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 8b25d31e8401..c80ec1d03274 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu)
return cpu == resident_cpu;
}
-struct psci_operations psci_ops;
+struct psci_operations psci_ops = {
+ .conduit = PSCI_CONDUIT_NONE,
+ .smccc_version = SMCCC_VERSION_1_0,
+};
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
@@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
0, 0, 0);
}
+static void set_conduit(enum psci_conduit conduit)
+{
+ switch (conduit) {
+ case PSCI_CONDUIT_HVC:
+ invoke_psci_fn = __invoke_psci_fn_hvc;
+ break;
+ case PSCI_CONDUIT_SMC:
+ invoke_psci_fn = __invoke_psci_fn_smc;
+ break;
+ default:
+ WARN(1, "Unexpected PSCI conduit %d\n", conduit);
+ }
+
+ psci_ops.conduit = conduit;
+}
+
static int get_set_conduit_method(struct device_node *np)
{
const char *method;
@@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np)
}
if (!strcmp("hvc", method)) {
- invoke_psci_fn = __invoke_psci_fn_hvc;
+ set_conduit(PSCI_CONDUIT_HVC);
} else if (!strcmp("smc", method)) {
- invoke_psci_fn = __invoke_psci_fn_smc;
+ set_conduit(PSCI_CONDUIT_SMC);
} else {
pr_warn("invalid \"method\" property: %s\n", method);
return -EINVAL;
@@ -493,6 +512,31 @@ static void __init psci_init_migrate(void)
pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
}
+static void __init psci_init_smccc(void)
+{
+ u32 ver = ARM_SMCCC_VERSION_1_0;
+ int feature;
+
+ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
+
+ if (feature != PSCI_RET_NOT_SUPPORTED) {
+ u32 ret;
+ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
+ if (ret == ARM_SMCCC_VERSION_1_1) {
+ psci_ops.smccc_version = SMCCC_VERSION_1_1;
+ ver = ret;
+ }
+ }
+
+ /*
+ * Conveniently, the SMCCC and PSCI versions are encoded the
+ * same way. No, this isn't accidental.
+ */
+ pr_info("SMC Calling Convention v%d.%d\n",
+ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
+
+}
+
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
@@ -541,6 +585,7 @@ static int __init psci_probe(void)
psci_init_migrate();
if (PSCI_VERSION_MAJOR(ver) >= 1) {
+ psci_init_smccc();
psci_init_cpu_suspend();
psci_init_system_suspend();
}
@@ -654,9 +699,9 @@ int __init psci_acpi_init(void)
pr_info("probing for conduit method from ACPI.\n");
if (acpi_psci_use_hvc())
- invoke_psci_fn = __invoke_psci_fn_hvc;
+ set_conduit(PSCI_CONDUIT_HVC);
else
- invoke_psci_fn = __invoke_psci_fn_smc;
+ set_conduit(PSCI_CONDUIT_SMC);
return psci_probe();
}
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index deb483064f53..a41b572eeeb1 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -694,10 +694,8 @@ static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
*/
fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
PLATFORM_DEVID_NONE, res, processed);
- if (IS_ERR(fw_cfg_cmdline_dev))
- return PTR_ERR(fw_cfg_cmdline_dev);
- return 0;
+ return PTR_ERR_OR_ZERO(fw_cfg_cmdline_dev);
}
static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index bb40d2529a30..239bf2a4b3c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -179,8 +179,12 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
amdgpu_gfx_bit_to_queue(adev, queue_bit, &mec, &pipe, &queue);
- /* Using pipes 2/3 from MEC 2 seems cause problems */
- if (mec == 1 && pipe > 1)
+ /*
+ * 1. Using pipes 2/3 from MEC 2 seems cause problems.
+ * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
+ * only can be issued on queue 0.
+ */
+ if ((mec == 1 && pipe > 1) || queue != 0)
continue;
ring->me = mec + 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6fc16eecf2dc..5afbc5e714d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -2262,12 +2262,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8);
+ uint64_t init_pde_value = 0, flags;
unsigned ring_instance;
struct amdgpu_ring *ring;
struct drm_sched_rq *rq;
+ unsigned long size;
int r, i;
- u64 flags;
- uint64_t init_pde_value = 0;
vm->va = RB_ROOT_CACHED;
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
@@ -2318,29 +2318,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW);
- r = amdgpu_bo_create(adev,
- amdgpu_vm_bo_size(adev, adev->vm_manager.root_level),
- align, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- flags,
- NULL, NULL, init_pde_value, &vm->root.base.bo);
+ size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
+ r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
+ flags, NULL, NULL, init_pde_value,
+ &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
+ r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ if (r)
+ goto error_free_root;
+
vm->root.base.vm = vm;
list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
- INIT_LIST_HEAD(&vm->root.base.vm_status);
-
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_reserve(vm->root.base.bo, false);
- if (r)
- goto error_free_root;
-
- r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
- amdgpu_bo_unreserve(vm->root.base.bo);
- if (r)
- goto error_free_root;
- }
+ list_add_tail(&vm->root.base.vm_status, &vm->evicted);
+ amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
unsigned long flags;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index b69ceafb7888..ee14d78be2a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -278,9 +278,9 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
/* Track retry faults in per-VM fault FIFO. */
spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- spin_unlock(&adev->vm_manager.pasid_lock);
- if (WARN_ON_ONCE(!vm)) {
+ if (!vm) {
/* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
amdgpu_ih_clear_fault(adev, key);
return true;
}
@@ -288,9 +288,11 @@ static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
r = kfifo_put(&vm->faults, key);
if (!r) {
/* FIFO is full. Ignore it until there is space */
+ spin_unlock(&adev->vm_manager.pasid_lock);
amdgpu_ih_clear_fault(adev, key);
goto ignore_iv;
}
+ spin_unlock(&adev->vm_manager.pasid_lock);
/* It's the first fault for this address, process it normally */
return true;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index da2b99c2d95f..1e3e05a11f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1049,7 +1049,6 @@ static int vi_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_CGTS |
AMD_CG_SUPPORT_GFX_CGTS_LS |
- AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_HDP_MGCG |
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 97bfc00d2a82..c62346fdc05d 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -119,16 +119,6 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
- if (map) {
- vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
- MEMREMAP_WC);
- if (!vgpu->gm.aperture_va)
- return -ENOMEM;
- } else {
- memunmap(vgpu->gm.aperture_va);
- vgpu->gm.aperture_va = NULL;
- }
-
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
@@ -141,11 +131,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
- if (ret) {
- memunmap(vgpu->gm.aperture_va);
- vgpu->gm.aperture_va = NULL;
+ if (ret)
return ret;
- }
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2ab584f97dfb..2fb7b34ef561 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -472,7 +472,6 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
ret = PTR_ERR(dmabuf);
goto out_free_gem;
}
- obj->base.dma_buf = dmabuf;
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 769c1c24ae75..70494e394d2c 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -521,24 +521,23 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
_EL_OFFSET_STATUS_PTR);
-
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu)
+static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
{
- enum intel_engine_id i;
+ unsigned int tmp;
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
+ struct intel_vgpu_submission *s = &vgpu->submission;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- struct intel_vgpu_submission *s = &vgpu->submission;
-
- kfree(s->ring_scan_buffer[i]);
- s->ring_scan_buffer[i] = NULL;
- s->ring_scan_buffer_size[i] = 0;
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
+ kfree(s->ring_scan_buffer[engine->id]);
+ s->ring_scan_buffer[engine->id] = NULL;
+ s->ring_scan_buffer_size[engine->id] = 0;
}
}
@@ -553,9 +552,10 @@ static void reset_execlist(struct intel_vgpu *vgpu,
init_vgpu_execlist(vgpu, engine->id);
}
-static int init_execlist(struct intel_vgpu *vgpu)
+static int init_execlist(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
- reset_execlist(vgpu, ALL_ENGINES);
+ reset_execlist(vgpu, engine_mask);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index a529d2bd393c..8d5317d0122d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -997,9 +997,11 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
- unsigned long i;
+ unsigned long gfn, i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
@@ -1007,9 +1009,10 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
if (gtt_type_is_pte_pt(spt->shadow_page.type)) {
for_each_present_guest_entry(spt, &ge, i) {
- ret = gtt_entry_p2m(vgpu, &ge, &se);
- if (ret)
- goto fail;
+ gfn = ops->get_pfn(&ge);
+ if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn) ||
+ gtt_entry_p2m(vgpu, &ge, &se))
+ ops->set_pfn(&se, gvt->gtt.scratch_mfn);
ppgtt_set_shadow_entry(spt, &se, i);
}
return 0;
@@ -1906,7 +1909,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
- unsigned long gma;
+ unsigned long gma, gfn;
struct intel_gvt_gtt_entry e, m;
int ret;
@@ -1925,6 +1928,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
bytes);
if (ops->test_present(&e)) {
+ gfn = ops->get_pfn(&e);
+
+ /* one PTE update may be issued in multiple writes and the
+ * first write may not construct a valid gfn
+ */
+ if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ goto out;
+ }
+
ret = gtt_entry_p2m(vgpu, &e, &m);
if (ret) {
gvt_vgpu_err("fail to translate guest gtt entry\n");
@@ -1939,6 +1952,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
}
+out:
ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index);
gtt_invalidate(gvt->dev_priv);
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 7dc7a80213a8..c6197d990818 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -82,7 +82,6 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
- void *aperture_va;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
@@ -127,7 +126,6 @@ struct intel_vgpu_irq {
struct intel_vgpu_opregion {
bool mapped;
void *va;
- void *va_gopregion;
u32 gfn[INTEL_GVT_OPREGION_PAGES];
};
@@ -152,8 +150,8 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu);
- void (*clean)(struct intel_vgpu *vgpu);
+ int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 92d6468daeee..9be639aa3b55 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1494,7 +1494,6 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct intel_vgpu_submission *s = &vgpu->submission;
u32 data = *(u32 *)p_data;
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
bool enable_execlist;
@@ -1523,11 +1522,9 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
if (!enable_execlist)
return 0;
- if (s->active)
- return 0;
-
ret = intel_vgpu_select_submission_ops(vgpu,
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ ENGINE_MASK(ring_id),
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -2843,6 +2840,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index a1bd82feb827..f8e77e166246 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -58,6 +58,7 @@ struct intel_gvt_mpt {
int (*set_opregion)(void *vgpu);
int (*get_vfio_device)(void *vgpu);
void (*put_vfio_device)(void *vgpu);
+ bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
};
extern struct intel_gvt_mpt xengt_mpt;
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 554d1db1f3c8..909499b73d03 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -651,6 +651,39 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret;
}
+static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off)
+{
+ return off >= vgpu_aperture_offset(vgpu) &&
+ off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
+}
+
+static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off,
+ void *buf, unsigned long count, bool is_write)
+{
+ void *aperture_va;
+
+ if (!intel_vgpu_in_aperture(vgpu, off) ||
+ !intel_vgpu_in_aperture(vgpu, off + count)) {
+ gvt_vgpu_err("Invalid aperture offset %llu\n", off);
+ return -EINVAL;
+ }
+
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ ALIGN_DOWN(off, PAGE_SIZE),
+ count + offset_in_page(off));
+ if (!aperture_va)
+ return -EIO;
+
+ if (is_write)
+ memcpy(aperture_va + offset_in_page(off), buf, count);
+ else
+ memcpy(buf, aperture_va + offset_in_page(off), count);
+
+ io_mapping_unmap(aperture_va);
+
+ return 0;
+}
+
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
@@ -679,8 +712,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count, is_write);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
- ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
- buf, count, is_write);
+ ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
break;
case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX:
@@ -1575,6 +1607,21 @@ static unsigned long kvmgt_virt_to_pfn(void *addr)
return PFN_DOWN(__pa(addr));
}
+static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
+{
+ struct kvmgt_guest_info *info;
+ struct kvm *kvm;
+
+ if (!handle_valid(handle))
+ return false;
+
+ info = (struct kvmgt_guest_info *)handle;
+ kvm = info->kvm;
+
+ return kvm_is_visible_gfn(kvm, gfn);
+
+}
+
struct intel_gvt_mpt kvmgt_mpt = {
.host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit,
@@ -1590,6 +1637,7 @@ struct intel_gvt_mpt kvmgt_mpt = {
.set_opregion = kvmgt_set_opregion,
.get_vfio_device = kvmgt_get_vfio_device,
.put_vfio_device = kvmgt_put_vfio_device,
+ .is_valid_gfn = kvmgt_is_valid_gfn,
};
EXPORT_SYMBOL_GPL(kvmgt_mpt);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 562b5ad857a4..5c869e3fdf3b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -56,38 +56,6 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
-static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
-{
- u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
- u64 aperture_sz = vgpu_aperture_sz(vgpu);
-
- return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
-}
-
-static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
- void *pdata, unsigned int size, bool is_read)
-{
- u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
- u64 offset = gpa - aperture_gpa;
-
- if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
- gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
- offset, size);
- return -EINVAL;
- }
-
- if (!vgpu->gm.aperture_va) {
- gvt_vgpu_err("BAR is not enabled\n");
- return -ENXIO;
- }
-
- if (is_read)
- memcpy(pdata, vgpu->gm.aperture_va + offset, size);
- else
- memcpy(vgpu->gm.aperture_va + offset, pdata, size);
- return 0;
-}
-
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
@@ -144,11 +112,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
}
mutex_lock(&gvt->lock);
- if (vgpu_gpa_is_aperture(vgpu, pa)) {
- ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
- goto out;
- }
-
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
@@ -222,11 +185,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock);
- if (vgpu_gpa_is_aperture(vgpu, pa)) {
- ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
- goto out;
- }
-
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8))
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 74834395dd89..73ad6e90e49d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -80,7 +80,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- { /* Terminated */ }
+ {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
@@ -146,7 +146,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
{RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- { /* Terminated */ }
+ {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -167,7 +167,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
};
int ring_id, i;
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs.control_table[ring_id][i] =
@@ -310,8 +310,8 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id);
- mmio = dev_priv->gvt->engine_mmio_list;
- while (i915_mmio_reg_offset((mmio++)->reg)) {
+ for (mmio = dev_priv->gvt->engine_mmio_list;
+ i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id)
continue;
// save
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index ca8005a6d5fa..81aff4eacbfe 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -339,4 +339,21 @@ static inline void intel_gvt_hypervisor_put_vfio_device(struct intel_vgpu *vgpu)
intel_gvt_host.mpt->put_vfio_device(vgpu);
}
+/**
+ * intel_gvt_hypervisor_is_valid_gfn - check if a visible gfn
+ * @vgpu: a vGPU
+ * @gfn: guest PFN
+ *
+ * Returns:
+ * true on valid gfn, false on not.
+ */
+static inline bool intel_gvt_hypervisor_is_valid_gfn(
+ struct intel_vgpu *vgpu, unsigned long gfn)
+{
+ if (!intel_gvt_host.mpt->is_valid_gfn)
+ return true;
+
+ return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
+}
+
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 8420d1fc3ddb..fa75a2eead90 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -299,21 +299,13 @@ int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa)
{
int i, ret = 0;
- unsigned long pfn;
gvt_dbg_core("emulate opregion from kernel\n");
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_KVM:
- pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gpa >> PAGE_SHIFT);
- vgpu_opregion(vgpu)->va_gopregion = memremap(pfn << PAGE_SHIFT,
- INTEL_GVT_OPREGION_SIZE,
- MEMREMAP_WB);
- if (!vgpu_opregion(vgpu)->va_gopregion) {
- gvt_vgpu_err("failed to map guest opregion\n");
- ret = -EFAULT;
- }
- vgpu_opregion(vgpu)->mapped = true;
+ for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
+ vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
break;
case INTEL_GVT_HYPERVISOR_XEN:
/**
@@ -352,10 +344,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (vgpu_opregion(vgpu)->mapped)
map_vgpu_opregion(vgpu, false);
} else if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
- if (vgpu_opregion(vgpu)->mapped) {
- memunmap(vgpu_opregion(vgpu)->va_gopregion);
- vgpu_opregion(vgpu)->va_gopregion = NULL;
- }
+ /* Guest opregion is released by VFIO */
}
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
get_order(INTEL_GVT_OPREGION_SIZE));
@@ -480,19 +469,40 @@ static bool querying_capabilities(u32 scic)
*/
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
{
- u32 *scic, *parm;
+ u32 scic, parm;
u32 func, subfunc;
+ u64 scic_pa = 0, parm_pa = 0;
+ int ret;
switch (intel_gvt_host.hypervisor_type) {
case INTEL_GVT_HYPERVISOR_XEN:
- scic = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_SCIC;
- parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM;
+ scic = *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_SCIC);
+ parm = *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_PARM);
break;
case INTEL_GVT_HYPERVISOR_KVM:
- scic = vgpu_opregion(vgpu)->va_gopregion +
- INTEL_GVT_OPREGION_SCIC;
- parm = vgpu_opregion(vgpu)->va_gopregion +
- INTEL_GVT_OPREGION_PARM;
+ scic_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_SCIC;
+ parm_pa = (vgpu_opregion(vgpu)->gfn[0] << PAGE_SHIFT) +
+ INTEL_GVT_OPREGION_PARM;
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, scic_pa,
+ &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ ret = intel_gvt_hypervisor_read_gpa(vgpu, parm_pa,
+ &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion read error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
break;
default:
gvt_vgpu_err("not supported hypervisor\n");
@@ -510,9 +520,9 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
return 0;
}
- func = GVT_OPREGION_FUNC(*scic);
- subfunc = GVT_OPREGION_SUBFUNC(*scic);
- if (!querying_capabilities(*scic)) {
+ func = GVT_OPREGION_FUNC(scic);
+ subfunc = GVT_OPREGION_SUBFUNC(scic);
+ if (!querying_capabilities(scic)) {
gvt_vgpu_err("requesting runtime service: func \"%s\","
" subfunc \"%s\"\n",
opregion_func_name(func),
@@ -521,11 +531,43 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci)
* emulate exit status of function call, '0' means
* "failure, generic, unsupported or unknown cause"
*/
- *scic &= ~OPREGION_SCIC_EXIT_MASK;
- return 0;
+ scic &= ~OPREGION_SCIC_EXIT_MASK;
+ goto out;
+ }
+
+ scic = 0;
+ parm = 0;
+
+out:
+ switch (intel_gvt_host.hypervisor_type) {
+ case INTEL_GVT_HYPERVISOR_XEN:
+ *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_SCIC) = scic;
+ *((u32 *)vgpu_opregion(vgpu)->va +
+ INTEL_GVT_OPREGION_PARM) = parm;
+ break;
+ case INTEL_GVT_HYPERVISOR_KVM:
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, scic_pa,
+ &scic, sizeof(scic));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ ret = intel_gvt_hypervisor_write_gpa(vgpu, parm_pa,
+ &parm, sizeof(parm));
+ if (ret) {
+ gvt_vgpu_err("guest opregion write error %d, gpa 0x%llx, len %lu\n",
+ ret, scic_pa, sizeof(scic));
+ return ret;
+ }
+
+ break;
+ default:
+ gvt_vgpu_err("not supported hypervisor\n");
+ return -EINVAL;
}
- *scic = 0;
- *parm = 0;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index eea1a2f92099..cc1ce361cd76 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -50,6 +50,7 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
+ bool active;
ktime_t sched_in_time;
ktime_t sched_out_time;
@@ -308,8 +309,15 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
+
+ /* this vgpu id has been removed */
+ if (idr_is_empty(&gvt->vgpu_idr))
+ hrtimer_cancel(&sched_data->timer);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -325,6 +333,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
sched_data->period), HRTIMER_MODE_ABS);
+ vgpu_data->active = true;
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
@@ -332,6 +341,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list);
+ vgpu_data->active = false;
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
@@ -367,9 +377,12 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{
- gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+ if (!vgpu_data->active) {
+ gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
+ vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
+ }
}
void intel_gvt_kick_schedule(struct intel_gvt *gvt)
@@ -382,6 +395,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
int ring_id;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+
+ if (!vgpu_data->active)
+ return;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 0056638b0c16..b55b3580ca1d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -991,7 +991,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- intel_vgpu_select_submission_ops(vgpu, 0);
+ intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_gem_context_put(s->shadow_ctx);
kmem_cache_destroy(s->workloads);
}
@@ -1079,6 +1079,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned long engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1091,21 +1092,21 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
if (WARN_ON(interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (s->active) {
- s->ops->clean(vgpu);
- s->active = false;
- gvt_dbg_core("vgpu%d: de-select ops [ %s ] \n",
- vgpu->id, s->ops->name);
- }
+ if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ return -EINVAL;
+
+ if (s->active)
+ s->ops->clean(vgpu, engine_mask);
if (interface == 0) {
s->ops = NULL;
s->virtual_submission_interface = 0;
- gvt_dbg_core("vgpu%d: no submission ops\n", vgpu->id);
+ s->active = false;
+ gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
return 0;
}
- ret = ops[interface]->init(vgpu);
+ ret = ops[interface]->init(vgpu, engine_mask);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3de77dfa7c59..ff175a98b19e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -141,6 +141,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
+ unsigned long engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 4688619f6a1c..b87b19d8443c 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -258,6 +258,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_gvt_debugfs_remove_vgpu(vgpu);
idr_remove(&gvt->vgpu_idr, vgpu->id);
+ if (idr_is_empty(&gvt->vgpu_idr))
+ intel_gvt_clean_irq(gvt);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
@@ -518,8 +520,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_submission(vgpu, resetting_eng);
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
- intel_vgpu_select_submission_ops(vgpu, 0);
-
+ intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ccb5ba043b63..95478db9998b 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1032,7 +1032,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
const struct drm_i915_reg_table *table = engine->reg_tables;
int count = engine->reg_table_count;
- do {
+ for (; count > 0; ++table, --count) {
if (!table->master || is_master) {
const struct drm_i915_reg_descriptor *reg;
@@ -1040,7 +1040,7 @@ find_reg(const struct intel_engine_cs *engine, bool is_master, u32 addr)
if (reg != NULL)
return reg;
}
- } while (table++, --count);
+ }
return NULL;
}
@@ -1212,6 +1212,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
continue;
}
+ if (desc->bits[i].offset >= length) {
+ DRM_DEBUG_DRIVER("CMD: Rejected command 0x%08X, too short to check bitmask (%s)\n",
+ *cmd, engine->name);
+ return false;
+ }
+
dword = cmd[desc->bits[i].offset] &
desc->bits[i].mask;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6c8da9d20c33..173d0095e3b2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1842,6 +1842,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_GEN9_LP(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ else
+ intel_display_set_init_power(dev_priv, true);
i915_gem_sanitize(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index caebd5825279..a42deebedb0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3717,7 +3717,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
+ u32 val, int timeout_us);
+#define sandybridge_pcode_write(dev_priv, mbox, val) \
+ sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
+
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_base_ms);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8bc3283484be..dd89abd2263d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3323,16 +3323,15 @@ i915_gem_retire_work_handler(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
- /* Keep the retire handler running until we are finally idle.
+ /*
+ * Keep the retire handler running until we are finally idle.
* We do not need to do this test under locking as in the worst-case
* we queue the retire worker once too often.
*/
- if (READ_ONCE(dev_priv->gt.awake)) {
- i915_queue_hangcheck(dev_priv);
+ if (READ_ONCE(dev_priv->gt.awake))
queue_delayed_work(dev_priv->wq,
&dev_priv->gt.retire_work,
round_jiffies_up_relative(HZ));
- }
}
static inline bool
@@ -5283,6 +5282,8 @@ err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_wq(dev_priv);
+
if (ret != -EIO)
i915_gem_cleanup_userptr(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c5f393870532..7e403eaa9e0f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct pagevec *pvec = &vm->free_pages;
+ struct pagevec stash;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
- /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
+ /*
+ * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
+ *
+ * We have to be careful as page allocation may trigger the shrinker
+ * (via direct reclaim) which will fill up the WC stash underneath us.
+ * So we add our WB pages into a temporary pvec on the stack and merge
+ * them into the WC stash after all the allocations are complete.
+ */
+ pagevec_init(&stash);
do {
struct page *page;
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
- pvec->pages[pvec->nr++] = page;
- } while (pagevec_space(pvec));
+ stash.pages[stash.nr++] = page;
+ } while (stash.nr < pagevec_space(pvec));
- if (unlikely(!pvec->nr))
- return NULL;
+ if (stash.nr) {
+ int nr = min_t(int, stash.nr, pagevec_space(pvec));
+ struct page **pages = stash.pages + stash.nr - nr;
- set_pages_array_wc(pvec->pages, pvec->nr);
+ if (nr && !set_pages_array_wc(pages, nr)) {
+ memcpy(pvec->pages + pvec->nr,
+ pages, sizeof(pages[0]) * nr);
+ pvec->nr += nr;
+ stash.nr -= nr;
+ }
- return pvec->pages[--pvec->nr];
+ pagevec_release(&stash);
+ }
+
+ return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
}
static void vm_free_pages_release(struct i915_address_space *vm,
@@ -1341,15 +1359,18 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
+ pd->used_pdes++;
+
pt = alloc_pt(vm);
- if (IS_ERR(pt))
+ if (IS_ERR(pt)) {
+ pd->used_pdes--;
goto unwind;
+ }
if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
- pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
@@ -1373,13 +1394,16 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
if (pd == vm->scratch_pd) {
+ pdp->used_pdpes++;
+
pd = alloc_pd(vm);
- if (IS_ERR(pd))
+ if (IS_ERR(pd)) {
+ pdp->used_pdpes--;
goto unwind;
+ }
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
- pdp->used_pdpes++;
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
@@ -2287,12 +2311,23 @@ static void gen8_check_and_clear_faults(struct drm_i915_private *dev_priv)
u32 fault = I915_READ(GEN8_RING_FAULT_REG);
if (fault & RING_FAULT_VALID) {
+ u32 fault_data0, fault_data1;
+ u64 fault_addr;
+
+ fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+ fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+ fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
+ ((u64)fault_data0 << 12);
+
DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
"\tEngine ID: %d\n"
"\tSource ID: %d\n"
"\tType: %d\n",
- fault & PAGE_MASK,
+ upper_32_bits(fault_addr),
+ lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
GEN8_RING_FAULT_ENGINE_ID(fault),
RING_FAULT_SRCID(fault),
RING_FAULT_FAULT_TYPE(fault));
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index d575109f7a7f..e09d18df8b7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -276,6 +276,8 @@ static void mark_busy(struct drm_i915_private *i915)
intel_engines_unpark(i915);
+ i915_queue_hangcheck(i915);
+
queue_delayed_work(i915->wq,
&i915->gt.retire_work,
round_jiffies_up_relative(HZ));
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 9029ed04879c..0e158f9287c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -363,13 +363,13 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_PURGEABLE);
- if (freed < sc->nr_to_scan)
+ if (sc->nr_scanned < sc->nr_to_scan)
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
&sc->nr_scanned,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- if (freed < sc->nr_to_scan && current_is_kswapd()) {
+ if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
intel_runtime_pm_get(i915);
freed += i915_gem_shrink(i915,
sc->nr_to_scan - sc->nr_scanned,
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 36d48422b475..1c30c688f23a 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -74,19 +74,19 @@
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i830_info __initconst = {
+static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
.platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
-static const struct intel_device_info intel_i845g_info __initconst = {
+static const struct intel_device_info intel_i845g_info = {
GEN2_FEATURES,
.platform = INTEL_I845G,
};
-static const struct intel_device_info intel_i85x_info __initconst = {
+static const struct intel_device_info intel_i85x_info = {
GEN2_FEATURES,
.platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
@@ -94,7 +94,7 @@ static const struct intel_device_info intel_i85x_info __initconst = {
.has_fbc = 1,
};
-static const struct intel_device_info intel_i865g_info __initconst = {
+static const struct intel_device_info intel_i865g_info = {
GEN2_FEATURES,
.platform = INTEL_I865G,
};
@@ -108,7 +108,7 @@ static const struct intel_device_info intel_i865g_info __initconst = {
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i915g_info __initconst = {
+static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@@ -116,7 +116,7 @@ static const struct intel_device_info intel_i915g_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i915gm_info __initconst = {
+static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
.platform = INTEL_I915GM,
.is_mobile = 1,
@@ -128,7 +128,7 @@ static const struct intel_device_info intel_i915gm_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945g_info __initconst = {
+static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
.platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -137,7 +137,7 @@ static const struct intel_device_info intel_i945g_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_i945gm_info __initconst = {
+static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
.platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
@@ -148,14 +148,14 @@ static const struct intel_device_info intel_i945gm_info __initconst = {
.unfenced_needs_alignment = 1,
};
-static const struct intel_device_info intel_g33_info __initconst = {
+static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
.platform = INTEL_G33,
.has_hotplug = 1,
.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info __initconst = {
+static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
.platform = INTEL_PINEVIEW, .is_mobile = 1,
.has_hotplug = 1,
@@ -172,7 +172,7 @@ static const struct intel_device_info intel_pineview_info __initconst = {
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_i965g_info __initconst = {
+static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
.platform = INTEL_I965G,
.has_overlay = 1,
@@ -180,7 +180,7 @@ static const struct intel_device_info intel_i965g_info __initconst = {
.has_snoop = false,
};
-static const struct intel_device_info intel_i965gm_info __initconst = {
+static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
.platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
@@ -190,13 +190,13 @@ static const struct intel_device_info intel_i965gm_info __initconst = {
.has_snoop = false,
};
-static const struct intel_device_info intel_g45_info __initconst = {
+static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
.platform = INTEL_G45,
.ring_mask = RENDER_RING | BSD_RING,
};
-static const struct intel_device_info intel_gm45_info __initconst = {
+static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
.platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
@@ -215,12 +215,12 @@ static const struct intel_device_info intel_gm45_info __initconst = {
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
-static const struct intel_device_info intel_ironlake_d_info __initconst = {
+static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
};
-static const struct intel_device_info intel_ironlake_m_info __initconst = {
+static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
.is_mobile = 1, .has_fbc = 1,
@@ -243,12 +243,12 @@ static const struct intel_device_info intel_ironlake_m_info __initconst = {
GEN6_FEATURES, \
.platform = INTEL_SANDYBRIDGE
-static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
+static const struct intel_device_info intel_sandybridge_d_gt1_info = {
SNB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
+static const struct intel_device_info intel_sandybridge_d_gt2_info = {
SNB_D_PLATFORM,
.gt = 2,
};
@@ -259,12 +259,12 @@ static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst =
.is_mobile = 1
-static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
+static const struct intel_device_info intel_sandybridge_m_gt1_info = {
SNB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
+static const struct intel_device_info intel_sandybridge_m_gt2_info = {
SNB_M_PLATFORM,
.gt = 2,
};
@@ -288,12 +288,12 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst =
.platform = INTEL_IVYBRIDGE, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
+static const struct intel_device_info intel_ivybridge_d_gt1_info = {
IVB_D_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
+static const struct intel_device_info intel_ivybridge_d_gt2_info = {
IVB_D_PLATFORM,
.gt = 2,
};
@@ -304,17 +304,17 @@ static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
.is_mobile = 1, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
+static const struct intel_device_info intel_ivybridge_m_gt1_info = {
IVB_M_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
+static const struct intel_device_info intel_ivybridge_m_gt2_info = {
IVB_M_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_ivybridge_q_info __initconst = {
+static const struct intel_device_info intel_ivybridge_q_info = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
.gt = 2,
@@ -322,7 +322,7 @@ static const struct intel_device_info intel_ivybridge_q_info __initconst = {
.has_l3_dpf = 1,
};
-static const struct intel_device_info intel_valleyview_info __initconst = {
+static const struct intel_device_info intel_valleyview_info = {
.platform = INTEL_VALLEYVIEW,
.gen = 7,
.is_lp = 1,
@@ -358,17 +358,17 @@ static const struct intel_device_info intel_valleyview_info __initconst = {
.platform = INTEL_HASWELL, \
.has_l3_dpf = 1
-static const struct intel_device_info intel_haswell_gt1_info __initconst = {
+static const struct intel_device_info intel_haswell_gt1_info = {
HSW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_haswell_gt2_info __initconst = {
+static const struct intel_device_info intel_haswell_gt2_info = {
HSW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_haswell_gt3_info __initconst = {
+static const struct intel_device_info intel_haswell_gt3_info = {
HSW_PLATFORM,
.gt = 3,
};
@@ -388,17 +388,17 @@ static const struct intel_device_info intel_haswell_gt3_info __initconst = {
.gen = 8, \
.platform = INTEL_BROADWELL
-static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt1_info = {
BDW_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt2_info = {
BDW_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
+static const struct intel_device_info intel_broadwell_rsvd_info = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
@@ -406,13 +406,13 @@ static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
*/
};
-static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
+static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
-static const struct intel_device_info intel_cherryview_info __initconst = {
+static const struct intel_device_info intel_cherryview_info = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
@@ -455,12 +455,12 @@ static const struct intel_device_info intel_cherryview_info __initconst = {
.gen = 9, \
.platform = INTEL_SKYLAKE
-static const struct intel_device_info intel_skylake_gt1_info __initconst = {
+static const struct intel_device_info intel_skylake_gt1_info = {
SKL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_skylake_gt2_info __initconst = {
+static const struct intel_device_info intel_skylake_gt2_info = {
SKL_PLATFORM,
.gt = 2,
};
@@ -470,12 +470,12 @@ static const struct intel_device_info intel_skylake_gt2_info __initconst = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
-static const struct intel_device_info intel_skylake_gt3_info __initconst = {
+static const struct intel_device_info intel_skylake_gt3_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
-static const struct intel_device_info intel_skylake_gt4_info __initconst = {
+static const struct intel_device_info intel_skylake_gt4_info = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
@@ -511,13 +511,13 @@ static const struct intel_device_info intel_skylake_gt4_info __initconst = {
IVB_CURSOR_OFFSETS, \
BDW_COLORS
-static const struct intel_device_info intel_broxton_info __initconst = {
+static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
.platform = INTEL_BROXTON,
.ddb_size = 512,
};
-static const struct intel_device_info intel_geminilake_info __initconst = {
+static const struct intel_device_info intel_geminilake_info = {
GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE,
.ddb_size = 1024,
@@ -529,17 +529,17 @@ static const struct intel_device_info intel_geminilake_info __initconst = {
.gen = 9, \
.platform = INTEL_KABYLAKE
-static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt1_info = {
KBL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt2_info = {
KBL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
+static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -550,17 +550,17 @@ static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
.gen = 9, \
.platform = INTEL_COFFEELAKE
-static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt1_info = {
CFL_PLATFORM,
.gt = 1,
};
-static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt2_info = {
CFL_PLATFORM,
.gt = 2,
};
-static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
+static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
@@ -571,7 +571,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
.ddb_size = 1024, \
GLK_COLORS
-static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
+static const struct intel_device_info intel_cannonlake_gt2_info = {
GEN10_FEATURES,
.is_alpha_support = 1,
.platform = INTEL_CANNONLAKE,
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 505c605eff98..a2108e35c599 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2489,6 +2489,8 @@ enum i915_power_well_id {
#define GEN8_FAULT_TLB_DATA0 _MMIO(0x4b10)
#define GEN8_FAULT_TLB_DATA1 _MMIO(0x4b14)
+#define FAULT_VA_HIGH_BITS (0xf << 0)
+#define FAULT_GTT_SEL (1 << 4)
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM (1<<31)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index f1502a0188eb..522d54fecb53 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -779,7 +779,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
{
struct intel_encoder *encoder;
- if (WARN_ON(pipe >= INTEL_INFO(dev_priv)->num_pipes))
+ if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
return NULL;
/* MST */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 51108ffc28d1..f7f771749e48 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1107,6 +1107,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
}
static const u8 cnp_ddc_pin_map[] = {
+ [0] = 0, /* N/A */
[DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
[DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
[DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
@@ -1115,9 +1116,14 @@ static const u8 cnp_ddc_pin_map[] = {
static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
{
- if (HAS_PCH_CNP(dev_priv) &&
- vbt_pin > 0 && vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map))
- return cnp_ddc_pin_map[vbt_pin];
+ if (HAS_PCH_CNP(dev_priv)) {
+ if (vbt_pin < ARRAY_SIZE(cnp_ddc_pin_map)) {
+ return cnp_ddc_pin_map[vbt_pin];
+ } else {
+ DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n", vbt_pin);
+ return 0;
+ }
+ }
return vbt_pin;
}
@@ -1323,11 +1329,13 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
- } else if (bdb->version <= 197) {
+ } else if (bdb->version <= 215) {
expected_size = 38;
+ } else if (bdb->version <= 216) {
+ expected_size = 39;
} else {
- expected_size = 38;
- BUILD_BUG_ON(sizeof(*child) < 38);
+ expected_size = sizeof(*child);
+ BUILD_BUG_ON(sizeof(*child) < 39);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 58c624f982d9..bd40fea16b4f 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -149,17 +149,6 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
return;
mod_timer(&b->fake_irq, jiffies + 1);
-
- /* Ensure that even if the GPU hangs, we get woken up.
- *
- * However, note that if no one is waiting, we never notice
- * a gpu hang. Eventually, we will have to wait for a resource
- * held by the GPU and so trigger a hangcheck. In the most
- * pathological case, this will be upon memory starvation! To
- * prevent this, we also queue the hangcheck from the retire
- * worker.
- */
- i915_queue_hangcheck(engine->i915);
}
static void irq_enable(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index d77e2bec1e29..5dc118f26b51 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1370,10 +1370,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
break;
}
- /* Inform power controller of upcoming frequency change */
+ /*
+ * Inform power controller of upcoming frequency change. BSpec
+ * requires us to wait up to 150usec, but that leads to timeouts;
+ * the 2ms used here is based on experiment.
+ */
mutex_lock(&dev_priv->pcu_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000);
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2000);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
@@ -1404,8 +1409,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
I915_WRITE(CDCLK_CTL, val);
mutex_lock(&dev_priv->pcu_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level);
+ /*
+ * The timeout isn't specified, the 2ms used here is based on
+ * experiment.
+ * FIXME: Waiting for the request completion could be delayed until
+ * the next PCODE request based on BSpec.
+ */
+ ret = sandybridge_pcode_write_timeout(dev_priv,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_state->voltage_level, 2000);
mutex_unlock(&dev_priv->pcu_lock);
if (ret) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 0cd355978ab4..f288bcc7be22 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5661,8 +5661,8 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
if (!crtc_state->base.active)
return 0;
- mask = BIT(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+ mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
+ mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -5674,7 +5674,7 @@ static u64 get_crtc_power_domains(struct drm_crtc *crtc,
}
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
- mask |= BIT(POWER_DOMAIN_AUDIO);
+ mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
if (crtc_state->shared_dpll)
mask |= BIT_ULL(POWER_DOMAIN_PLLS);
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 05907fa8a553..cf8fef8b6f58 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -328,14 +328,22 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
return;
failure_handling:
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
- if (!intel_dp_get_link_train_fallback_values(intel_dp,
- intel_dp->link_rate,
- intel_dp->lane_count))
- /* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
+ /* Dont fallback and prune modes if its eDP */
+ if (!intel_dp_is_edp(intel_dp)) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
+ if (!intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count))
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
+ } else {
+ DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
+ }
return;
}
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6bb51a502b8b..d790bdc227ff 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1951,8 +1951,22 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
spin_lock_irqsave(&engine->stats.lock, flags);
if (engine->stats.enabled == ~0)
goto busy;
- if (engine->stats.enabled++ == 0)
+ if (engine->stats.enabled++ == 0) {
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct execlist_port *port = execlists->port;
+ unsigned int num_ports = execlists_num_ports(execlists);
+
engine->stats.enabled_at = ktime_get();
+
+ /* XXX submission method oblivious? */
+ while (num_ports-- && port_isset(port)) {
+ engine->stats.active++;
+ port++;
+ }
+
+ if (engine->stats.active)
+ engine->stats.start = engine->stats.enabled_at;
+ }
spin_unlock_irqrestore(&engine->stats.lock, flags);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index cbc51c960425..3b0932942857 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -39,9 +39,6 @@
#define KBL_FW_MAJOR 9
#define KBL_FW_MINOR 39
-#define GLK_FW_MAJOR 10
-#define GLK_FW_MINOR 56
-
#define GUC_FW_PATH(platform, major, minor) \
"i915/" __stringify(platform) "_guc_ver" __stringify(major) "_" __stringify(minor) ".bin"
@@ -54,8 +51,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
-#define I915_GLK_GUC_UCODE GUC_FW_PATH(glk, GLK_FW_MAJOR, GLK_FW_MINOR)
-
static void guc_fw_select(struct intel_uc_fw *guc_fw)
{
struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
@@ -82,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->path = I915_KBL_GUC_UCODE;
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- guc_fw->path = I915_GLK_GUC_UCODE;
- guc_fw->major_ver_wanted = GLK_FW_MAJOR;
- guc_fw->minor_ver_wanted = GLK_FW_MINOR;
} else {
DRM_WARN("%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 31f01d64c021..348a4f7ffb67 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -411,7 +411,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int hung = 0, stuck = 0;
- int busy_count = 0;
if (!i915_modparams.enable_hangcheck)
return;
@@ -429,7 +428,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
for_each_engine(engine, dev_priv, id) {
- const bool busy = intel_engine_has_waiter(engine);
struct intel_engine_hangcheck hc;
semaphore_clear_deadlocks(dev_priv);
@@ -443,16 +441,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (hc.action != ENGINE_DEAD)
stuck |= intel_engine_flag(engine);
}
-
- busy_count += busy;
}
if (hung)
hangcheck_declare_hang(dev_priv, hung, stuck);
/* Reset timer in case GPU hangs without another request being added */
- if (busy_count)
- i915_queue_hangcheck(dev_priv);
+ i915_queue_hangcheck(dev_priv);
}
void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index bced7b954d93..179d0ad3889d 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1595,12 +1595,20 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct edid *edid;
bool connected = false;
+ struct i2c_adapter *i2c;
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv,
- intel_hdmi->ddc_bus));
+ i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
+
+ edid = drm_get_edid(connector, i2c);
+
+ if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ DRM_DEBUG_KMS("HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
+ intel_gmbus_force_bit(i2c, true);
+ edid = drm_get_edid(connector, i2c);
+ intel_gmbus_force_bit(i2c, false);
+ }
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 974be3defa70..8ed05182f944 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -54,10 +54,6 @@
#define KBL_HUC_FW_MINOR 00
#define KBL_BLD_NUM 1810
-#define GLK_HUC_FW_MAJOR 02
-#define GLK_HUC_FW_MINOR 00
-#define GLK_BLD_NUM 1748
-
#define HUC_FW_PATH(platform, major, minor, bld_num) \
"i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
__stringify(minor) "_" __stringify(bld_num) ".bin"
@@ -74,9 +70,6 @@ MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
KBL_HUC_FW_MINOR, KBL_BLD_NUM)
MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
-#define I915_GLK_HUC_UCODE HUC_FW_PATH(glk, GLK_HUC_FW_MAJOR, \
- GLK_HUC_FW_MINOR, GLK_BLD_NUM)
-
static void huc_fw_select(struct intel_uc_fw *huc_fw)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
@@ -103,10 +96,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else if (IS_GEMINILAKE(dev_priv)) {
- huc_fw->path = I915_GLK_HUC_UCODE;
- huc_fw->major_ver_wanted = GLK_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = GLK_HUC_FW_MINOR;
} else {
DRM_WARN("%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(huc_fw->type));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1db79a860b96..1a6e699e19e0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -9149,8 +9149,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
return 0;
}
-int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
- u32 mbox, u32 val)
+int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
+ u32 mbox, u32 val, int timeout_us)
{
int status;
@@ -9173,7 +9173,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
if (__intel_wait_for_register_fw(dev_priv,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500, 0, NULL)) {
+ timeout_us, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
val, mbox, __builtin_return_address(0));
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index 907deac6e3fa..d82ca0f438f5 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -209,8 +209,6 @@ void intel_uc_fini_wq(struct drm_i915_private *dev_priv)
if (!USES_GUC(dev_priv))
return;
- GEM_BUG_ON(!HAS_GUC(dev_priv));
-
intel_guc_fini_wq(&dev_priv->guc);
}
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index e3d7745a9151..98dff6058d3c 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -412,6 +412,8 @@ struct child_device_config {
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */
+ u8 dp_max_link_rate:2; /* 216 CNL+ */
+ u8 dp_max_link_rate_reserved:6; /* 216 */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index adb78f7d083a..92be0e5269c6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -75,6 +75,7 @@ int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gk110_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index 59f3ba551681..b57fe4ae93ba 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -60,6 +60,7 @@ int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gp108_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
index b1ac47eb786e..9398d9f09339 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h
@@ -46,6 +46,16 @@ enum nvkm_therm_attr_type {
NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST = 17,
};
+struct nvkm_therm_clkgate_init {
+ u32 addr;
+ u8 count;
+ u32 data;
+};
+
+struct nvkm_therm_clkgate_pack {
+ const struct nvkm_therm_clkgate_init *init;
+};
+
struct nvkm_therm {
const struct nvkm_therm_func *func;
struct nvkm_subdev subdev;
@@ -85,17 +95,24 @@ struct nvkm_therm {
int (*attr_get)(struct nvkm_therm *, enum nvkm_therm_attr_type);
int (*attr_set)(struct nvkm_therm *, enum nvkm_therm_attr_type, int);
+
+ bool clkgating_enabled;
};
int nvkm_therm_temp_get(struct nvkm_therm *);
int nvkm_therm_fan_sense(struct nvkm_therm *);
int nvkm_therm_cstate(struct nvkm_therm *, int, int);
+void nvkm_therm_clkgate_init(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+void nvkm_therm_clkgate_enable(struct nvkm_therm *);
+void nvkm_therm_clkgate_fini(struct nvkm_therm *, bool);
int nv40_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int nv50_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int g84_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gt215_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gf119_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
+int gk104_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm107_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gm200_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
int gp100_therm_new(struct nvkm_device *, int, struct nvkm_therm **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 7b5cc5c73d20..be8e00b49cde 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -105,4 +105,32 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
+static inline void
+nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
+{
+ if (*pnvbo) {
+ nouveau_bo_unmap(*pnvbo);
+ nouveau_bo_unpin(*pnvbo);
+ nouveau_bo_ref(NULL, pnvbo);
+ }
+}
+
+static inline int
+nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
+ struct nouveau_bo **pnvbo)
+{
+ int ret = nouveau_bo_new(cli, size, align, flags,
+ 0, 0, NULL, NULL, pnvbo);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(*pnvbo, flags, true);
+ if (ret == 0) {
+ ret = nouveau_bo_map(*pnvbo);
+ if (ret == 0)
+ return ret;
+ nouveau_bo_unpin(*pnvbo);
+ }
+ nouveau_bo_ref(NULL, pnvbo);
+ }
+ return ret;
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index b7a18fbee6dc..366acb928f57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -60,7 +60,6 @@ struct nouveau_crtc {
} cursor;
struct {
- struct nouveau_bo *nvbo;
int depth;
} lut;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ee5d1dc2eaf5..85c1f10bc2b6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -56,6 +56,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
+static int nouveau_fbcon_bpp;
+module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
+
static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
@@ -488,7 +492,7 @@ nouveau_fbcon_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_fbdev *fbcon;
- int preferred_bpp;
+ int preferred_bpp = nouveau_fbcon_bpp;
int ret;
if (!dev->mode_config.num_crtc ||
@@ -512,13 +516,15 @@ nouveau_fbcon_init(struct drm_device *dev)
if (ret)
goto fini;
- if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
- preferred_bpp = 8;
- else
- if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
- preferred_bpp = 16;
- else
- preferred_bpp = 32;
+ if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
+ if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
+ preferred_bpp = 8;
+ else
+ if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
+ preferred_bpp = 16;
+ else
+ preferred_bpp = 32;
+ }
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!drm_drv_uses_atomic_modeset(dev))
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index b22c37bde13f..dd8d4352ed99 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -137,8 +137,10 @@ struct nv50_head_atom {
} mode;
struct {
+ bool visible;
u32 handle;
u64 offset:40;
+ u8 mode:4;
} lut;
struct {
@@ -192,6 +194,7 @@ struct nv50_head_atom {
union {
struct {
+ bool ilut:1;
bool core:1;
bool curs:1;
};
@@ -200,6 +203,7 @@ struct nv50_head_atom {
union {
struct {
+ bool ilut:1;
bool core:1;
bool curs:1;
bool view:1;
@@ -660,6 +664,10 @@ nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
struct nv50_head {
struct nouveau_crtc base;
+ struct {
+ struct nouveau_bo *nvbo[2];
+ int next;
+ } lut;
struct nv50_ovly ovly;
struct nv50_oimm oimm;
};
@@ -1795,6 +1803,54 @@ nv50_head_lut_clr(struct nv50_head *head)
}
static void
+nv50_head_lut_load(struct drm_property_blob *blob, int mode,
+ struct nouveau_bo *nvbo)
+{
+ struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+ void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
+ const int size = blob->length / sizeof(*in);
+ int bits, shift, i;
+ u16 zero, r, g, b;
+
+ /* This can't happen.. But it shuts the compiler up. */
+ if (WARN_ON(size != 256))
+ return;
+
+ switch (mode) {
+ case 0: /* LORES. */
+ case 1: /* HIRES. */
+ bits = 11;
+ shift = 3;
+ zero = 0x0000;
+ break;
+ case 7: /* INTERPOLATE_257_UNITY_RANGE. */
+ bits = 14;
+ shift = 0;
+ zero = 0x6000;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = 0; i < size; i++) {
+ r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
+ g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
+ b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
+ writew(r, lut + (i * 0x08) + 0);
+ writew(g, lut + (i * 0x08) + 2);
+ writew(b, lut + (i * 0x08) + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(r, lut + (i * 0x08) + 0);
+ writew(g, lut + (i * 0x08) + 2);
+ writew(b, lut + (i * 0x08) + 4);
+}
+
+static void
nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
@@ -1802,18 +1858,18 @@ nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if ((push = evo_wait(core, 7))) {
if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0xc0000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 30);
evo_data(push, asyh->lut.offset >> 8);
} else
if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
- evo_data(push, 0xc0000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 30);
evo_data(push, asyh->lut.offset >> 8);
evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
evo_data(push, asyh->lut.handle);
} else {
evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
- evo_data(push, 0x83000000);
+ evo_data(push, 0x80000000 | asyh->lut.mode << 24);
evo_data(push, asyh->lut.offset >> 8);
evo_data(push, 0x00000000);
evo_data(push, 0x00000000);
@@ -1896,7 +1952,7 @@ nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
static void
nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
{
- if (asyh->clr.core && (!asyh->set.core || y))
+ if (asyh->clr.ilut && (!asyh->set.ilut || y))
nv50_head_lut_clr(head);
if (asyh->clr.core && (!asyh->set.core || y))
nv50_head_core_clr(head);
@@ -1909,7 +1965,15 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
if (asyh->set.view ) nv50_head_view (head, asyh);
if (asyh->set.mode ) nv50_head_mode (head, asyh);
- if (asyh->set.core ) nv50_head_lut_set (head, asyh);
+ if (asyh->set.ilut ) {
+ struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
+ struct drm_property_blob *blob = asyh->state.gamma_lut;
+ if (blob)
+ nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
+ asyh->lut.offset = nvbo->bo.offset;
+ head->lut.next ^= 1;
+ nv50_head_lut_set(head, asyh);
+ }
if (asyh->set.core ) nv50_head_core_set(head, asyh);
if (asyh->set.curs ) nv50_head_curs_set(head, asyh);
if (asyh->set.base ) nv50_head_base (head, asyh);
@@ -2044,6 +2108,37 @@ nv50_head_atomic_check_view(struct nv50_head_atom *armh,
}
static void
+nv50_head_atomic_check_lut(struct nv50_head *head,
+ struct nv50_head_atom *armh,
+ struct nv50_head_atom *asyh)
+{
+ struct nv50_disp *disp = nv50_disp(head->base.base.dev);
+
+ /* An I8 surface without an input LUT makes no sense, and
+ * EVO will throw an error if you try.
+ *
+ * Legacy clients actually cause this due to the order in
+ * which they call ioctls, so we will enable the LUT with
+ * whatever contents the buffer already contains to avoid
+ * triggering the error check.
+ */
+ if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
+ asyh->lut.handle = 0;
+ asyh->clr.ilut = armh->lut.visible;
+ return;
+ }
+
+ if (disp->disp->oclass < GF110_DISP) {
+ asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
+ asyh->set.ilut = true;
+ } else {
+ asyh->lut.mode = 7;
+ asyh->set.ilut = asyh->state.color_mgmt_changed;
+ }
+ asyh->lut.handle = disp->mast.base.vram.handle;
+}
+
+static void
nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
@@ -2128,6 +2223,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
if (asyh->state.mode_changed)
nv50_head_atomic_check_mode(head, asyh);
+ if (asyh->state.color_mgmt_changed ||
+ asyh->base.cpp != armh->base.cpp)
+ nv50_head_atomic_check_lut(head, armh, asyh);
+ asyh->lut.visible = asyh->lut.handle != 0;
+
if (asyc) {
if (asyc->set.scaler)
nv50_head_atomic_check_view(armh, asyh, asyc);
@@ -2143,7 +2243,8 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->core.w = asyh->base.w;
asyh->core.h = asyh->base.h;
} else
- if ((asyh->core.visible = asyh->curs.visible)) {
+ if ((asyh->core.visible = asyh->curs.visible) ||
+ (asyh->core.visible = asyh->lut.visible)) {
/*XXX: We need to either find some way of having the
* primary base layer appear black, while still
* being able to display the other layers, or we
@@ -2161,11 +2262,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->core.layout = 1;
asyh->core.block = 0;
asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
- asyh->lut.handle = disp->mast.base.vram.handle;
- asyh->lut.offset = head->base.lut.nvbo->bo.offset;
asyh->set.base = armh->base.cpp != asyh->base.cpp;
asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
} else {
+ asyh->lut.visible = false;
asyh->core.visible = false;
asyh->curs.visible = false;
asyh->base.cpp = 0;
@@ -2189,8 +2289,10 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
asyh->clr.curs = true;
}
} else {
+ asyh->clr.ilut = armh->lut.visible;
asyh->clr.core = armh->core.visible;
asyh->clr.curs = armh->curs.visible;
+ asyh->set.ilut = asyh->lut.visible;
asyh->set.core = asyh->core.visible;
asyh->set.curs = asyh->curs.visible;
}
@@ -2200,47 +2302,11 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
return 0;
}
-static void
-nv50_head_lut_load(struct drm_crtc *crtc)
-{
- struct nv50_disp *disp = nv50_disp(crtc->dev);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
- u16 *r, *g, *b;
- int i;
-
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
-
- for (i = 0; i < 256; i++) {
- if (disp->disp->oclass < GF110_DISP) {
- writew((*r++ >> 2) + 0x0000, lut + (i * 0x08) + 0);
- writew((*g++ >> 2) + 0x0000, lut + (i * 0x08) + 2);
- writew((*b++ >> 2) + 0x0000, lut + (i * 0x08) + 4);
- } else {
- /* 0x6000 interferes with the 14-bit color??? */
- writew((*r++ >> 2) + 0x6000, lut + (i * 0x20) + 0);
- writew((*g++ >> 2) + 0x6000, lut + (i * 0x20) + 2);
- writew((*b++ >> 2) + 0x6000, lut + (i * 0x20) + 4);
- }
- }
-}
-
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
};
-static int
-nv50_head_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- nv50_head_lut_load(crtc);
- return 0;
-}
-
static void
nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
@@ -2296,17 +2362,15 @@ nv50_head_reset(struct drm_crtc *crtc)
static void
nv50_head_destroy(struct drm_crtc *crtc)
{
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nv50_disp *disp = nv50_disp(crtc->dev);
struct nv50_head *head = nv50_head(crtc);
+ int i;
nv50_dmac_destroy(&head->ovly.base, disp->disp);
nv50_pioc_destroy(&head->oimm.base);
- nouveau_bo_unmap(nv_crtc->lut.nvbo);
- if (nv_crtc->lut.nvbo)
- nouveau_bo_unpin(nv_crtc->lut.nvbo);
- nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
+ for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
+ nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
drm_crtc_cleanup(crtc);
kfree(crtc);
@@ -2315,7 +2379,7 @@ nv50_head_destroy(struct drm_crtc *crtc)
static const struct drm_crtc_funcs
nv50_head_func = {
.reset = nv50_head_reset,
- .gamma_set = nv50_head_gamma_set,
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = nv50_head_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -2333,7 +2397,7 @@ nv50_head_create(struct drm_device *dev, int index)
struct nv50_base *base;
struct nv50_curs *curs;
struct drm_crtc *crtc;
- int ret;
+ int ret, i;
head = kzalloc(sizeof(*head), GFP_KERNEL);
if (!head)
@@ -2355,22 +2419,14 @@ nv50_head_create(struct drm_device *dev, int index)
drm_crtc_helper_add(crtc, &nv50_head_help);
drm_mode_crtc_set_gamma_size(crtc, 256);
- ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
- 0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
- if (!ret) {
- ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
- if (!ret) {
- ret = nouveau_bo_map(head->base.lut.nvbo);
- if (ret)
- nouveau_bo_unpin(head->base.lut.nvbo);
- }
+ for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
+ ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
+ TTM_PL_FLAG_VRAM,
+ &head->lut.nvbo[i]);
if (ret)
- nouveau_bo_ref(NULL, &head->base.lut.nvbo);
+ goto out;
}
- if (ret)
- goto out;
-
/* allocate overlay resources */
ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
if (ret)
@@ -4350,7 +4406,6 @@ nv50_display_init(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_plane *plane;
- struct drm_crtc *crtc;
u32 *push;
push = evo_wait(nv50_mast(dev), 32);
@@ -4369,10 +4424,6 @@ nv50_display_init(struct drm_device *dev)
}
}
- drm_for_each_crtc(crtc, dev) {
- nv50_head_lut_load(crtc);
- }
-
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 08e77cd55e6e..05cd674326a6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -28,6 +28,7 @@
#include <core/option.h>
#include <subdev/bios.h>
+#include <subdev/therm.h>
static DEFINE_MUTEX(nv_devices_mutex);
static LIST_HEAD(nv_devices);
@@ -1682,7 +1683,7 @@ nve4_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1721,7 +1722,7 @@ nve6_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1760,7 +1761,7 @@ nve7_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk104_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1811,7 +1812,7 @@ nvf0_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1824,7 +1825,7 @@ nvf0_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1849,7 +1850,7 @@ nvf1_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1862,7 +1863,7 @@ nvf1_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk110_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1887,7 +1888,7 @@ nv106_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1900,7 +1901,7 @@ nv106_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -1925,7 +1926,7 @@ nv108_chipset = {
.bus = gf100_bus_new,
.clk = gk104_clk_new,
.devinit = gf100_devinit_new,
- .fb = gk104_fb_new,
+ .fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
@@ -1938,7 +1939,7 @@ nv108_chipset = {
.mxm = nv50_mxm_new,
.pci = gk104_pci_new,
.pmu = gk208_pmu_new,
- .therm = gf119_therm_new,
+ .therm = gk104_therm_new,
.timer = nv41_timer_new,
.top = gk104_top_new,
.volt = gk104_volt_new,
@@ -2345,6 +2346,7 @@ nv138_chipset = {
.mc = gp100_mc_new,
.mmu = gp100_mmu_new,
.therm = gp100_therm_new,
+ .secboot = gp108_secboot_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2356,6 +2358,10 @@ nv138_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
+ .gr = gp107_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
};
static const struct nvkm_device_chip
@@ -2508,6 +2514,7 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
}
}
+ nvkm_therm_clkgate_fini(device->therm, suspend);
if (device->func->fini)
device->func->fini(device, suspend);
@@ -2597,6 +2604,7 @@ nvkm_device_init(struct nvkm_device *device)
}
nvkm_acpi_init(device);
+ nvkm_therm_clkgate_enable(device->therm);
time = ktime_to_us(ktime_get()) - time;
nvdev_trace(device, "init completed in %lldus\n", time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index d7c2adb9b543..c8ec3fd97155 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -137,6 +137,7 @@ struct gf100_gr_func {
int (*rops)(struct gf100_gr *);
int ppc_nr;
const struct gf100_grctx_func *grctx;
+ const struct nvkm_therm_clkgate_pack *clkgate_pack;
struct nvkm_sclass sclass[];
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
index 5e82f94c2245..1b52fcb2c49a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
+#include "gk104.h"
#include "ctxgf100.h"
#include <nvif/class.h>
@@ -173,6 +174,208 @@ gk104_gr_pack_mmio[] = {
{}
};
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_main_0[] = {
+ { 0x4041f0, 1, 0x00004046 },
+ { 0x409890, 1, 0x00000045 },
+ { 0x4098b0, 1, 0x0000007f },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rstr2d_0[] = {
+ { 0x4078c0, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_unk_0[] = {
+ { 0x406000, 1, 0x00004044 },
+ { 0x405860, 1, 0x00004042 },
+ { 0x40590c, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gcc_0[] = {
+ { 0x408040, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_sked_0[] = {
+ { 0x407000, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_unk_1[] = {
+ { 0x405bf0, 1, 0x00004044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ctxctl_0[] = {
+ { 0x41a890, 1, 0x00000042 },
+ { 0x41a8b0, 1, 0x0000007f },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_0[] = {
+ { 0x418500, 1, 0x00004042 },
+ { 0x418608, 1, 0x00004042 },
+ { 0x418688, 1, 0x00004042 },
+ { 0x418718, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_esetup_0[] = {
+ { 0x418828, 1, 0x00000044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tpbus_0[] = {
+ { 0x418bbc, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_zcull_0[] = {
+ { 0x418970, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tpconf_0[] = {
+ { 0x418c70, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_1[] = {
+ { 0x418cf0, 1, 0x00004042 },
+ { 0x418d70, 1, 0x00004042 },
+ { 0x418f0c, 1, 0x00004042 },
+ { 0x418e0c, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_gcc_0[] = {
+ { 0x419020, 1, 0x00004042 },
+ { 0x419038, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ffb_0[] = {
+ { 0x418898, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_tex_0[] = {
+ { 0x419a40, 9, 0x00004042 },
+ { 0x419acc, 1, 0x00004047 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_poly_0[] = {
+ { 0x419868, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_l1c_0[] = {
+ { 0x419ccc, 3, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_unk_2[] = {
+ { 0x419c70, 1, 0x00004045 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_mp_0[] = {
+ { 0x419fd0, 1, 0x00004043 },
+ { 0x419fd8, 1, 0x00004049 },
+ { 0x419fe0, 2, 0x00004042 },
+ { 0x419ff0, 1, 0x00004046 },
+ { 0x419ff8, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_gpc_ppc_0[] = {
+ { 0x41be28, 1, 0x00000042 },
+ { 0x41bfe8, 1, 0x00004042 },
+ { 0x41bed0, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_zrop_0[] = {
+ { 0x408810, 2, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_0[] = {
+ { 0x408a80, 6, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_rop_crop_0[] = {
+ { 0x4089a8, 1, 0x00004042 },
+ { 0x4089b0, 1, 0x00000042 },
+ { 0x4089b8, 1, 0x00004042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_clkgate_blcg_init_pxbar_0[] = {
+ { 0x13c820, 1, 0x0001007f },
+ { 0x13cbe0, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk104_clkgate_pack[] = {
+ { gk104_clkgate_blcg_init_main_0 },
+ { gk104_clkgate_blcg_init_rstr2d_0 },
+ { gk104_clkgate_blcg_init_unk_0 },
+ { gk104_clkgate_blcg_init_gcc_0 },
+ { gk104_clkgate_blcg_init_sked_0 },
+ { gk104_clkgate_blcg_init_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_0 },
+ { gk104_clkgate_blcg_init_gpc_esetup_0 },
+ { gk104_clkgate_blcg_init_gpc_tpbus_0 },
+ { gk104_clkgate_blcg_init_gpc_zcull_0 },
+ { gk104_clkgate_blcg_init_gpc_tpconf_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_gcc_0 },
+ { gk104_clkgate_blcg_init_gpc_ffb_0 },
+ { gk104_clkgate_blcg_init_gpc_tex_0 },
+ { gk104_clkgate_blcg_init_gpc_poly_0 },
+ { gk104_clkgate_blcg_init_gpc_l1c_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_2 },
+ { gk104_clkgate_blcg_init_gpc_mp_0 },
+ { gk104_clkgate_blcg_init_gpc_ppc_0 },
+ { gk104_clkgate_blcg_init_rop_zrop_0 },
+ { gk104_clkgate_blcg_init_rop_0 },
+ { gk104_clkgate_blcg_init_rop_crop_0 },
+ { gk104_clkgate_blcg_init_pxbar_0 },
+ {}
+};
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@@ -214,6 +417,9 @@ gk104_gr_init(struct gf100_gr *gr)
gr->func->init_gpc_mmu(gr);
gf100_gr_mmio(gr, gr->func->mmio);
+ if (gr->func->clkgate_pack)
+ nvkm_therm_clkgate_init(gr->base.engine.subdev.device->therm,
+ gr->func->clkgate_pack);
nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
@@ -338,6 +544,7 @@ gk104_gr = {
.rops = gf100_gr_rops,
.ppc_nr = 1,
.grctx = &gk104_grctx,
+ .clkgate_pack = gk104_clkgate_pack,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
new file mode 100644
index 000000000000..a24c177365d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk104.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul <lyude@redhat.com>
+ */
+#ifndef __GK104_GR_H__
+#define __GK104_GR_H__
+
+#include <subdev/therm.h>
+
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_main_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rstr2d_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gcc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_sked_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_unk_1[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ctxctl_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_esetup_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpbus_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_zcull_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tpconf_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_1[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_gcc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ffb_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_tex_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_poly_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_l1c_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_unk_2[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_mp_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_gpc_ppc_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_zrop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_rop_crop_0[];
+extern const struct nvkm_therm_clkgate_init gk104_clkgate_blcg_init_pxbar_0[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
index a38e19b61c1d..4da916a9fc73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk110.c
@@ -22,6 +22,7 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "gf100.h"
+#include "gk104.h"
#include "ctxgf100.h"
#include <subdev/timer.h>
@@ -156,6 +157,159 @@ gk110_gr_pack_mmio[] = {
{}
};
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_sked_0[] = {
+ { 0x407000, 1, 0x00004041 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_gcc_0[] = {
+ { 0x419020, 1, 0x00000042 },
+ { 0x419038, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_l1c_0[] = {
+ { 0x419cd4, 2, 0x00004042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_blcg_init_gpc_mp_0[] = {
+ { 0x419fd0, 1, 0x00004043 },
+ { 0x419fd8, 1, 0x00004049 },
+ { 0x419fe0, 2, 0x00004042 },
+ { 0x419ff0, 1, 0x00000046 },
+ { 0x419ff8, 1, 0x00004042 },
+ { 0x419f90, 1, 0x00004042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_main_0[] = {
+ { 0x4041f4, 1, 0x00000000 },
+ { 0x409894, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_unk_0[] = {
+ { 0x406004, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_sked_0[] = {
+ { 0x407004, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_ctxctl_0[] = {
+ { 0x41a894, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_unk_0[] = {
+ { 0x418504, 1, 0x00000000 },
+ { 0x41860c, 1, 0x00000000 },
+ { 0x41868c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_esetup_0[] = {
+ { 0x41882c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_zcull_0[] = {
+ { 0x418974, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_l1c_0[] = {
+ { 0x419cd8, 2, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_unk_1[] = {
+ { 0x419c74, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_mp_0[] = {
+ { 0x419fd4, 1, 0x00004a4a },
+ { 0x419fdc, 1, 0x00000014 },
+ { 0x419fe4, 1, 0x00000000 },
+ { 0x419ff4, 1, 0x00001724 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_gpc_ppc_0[] = {
+ { 0x41be2c, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_init
+gk110_clkgate_slcg_init_pcounter_0[] = {
+ { 0x1be018, 1, 0x000001ff },
+ { 0x1bc018, 1, 0x000001ff },
+ { 0x1b8018, 1, 0x000001ff },
+ { 0x1b4124, 1, 0x00000000 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk110_clkgate_pack[] = {
+ { gk104_clkgate_blcg_init_main_0 },
+ { gk104_clkgate_blcg_init_rstr2d_0 },
+ { gk104_clkgate_blcg_init_unk_0 },
+ { gk104_clkgate_blcg_init_gcc_0 },
+ { gk110_clkgate_blcg_init_sked_0 },
+ { gk104_clkgate_blcg_init_unk_1 },
+ { gk104_clkgate_blcg_init_gpc_ctxctl_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_0 },
+ { gk104_clkgate_blcg_init_gpc_esetup_0 },
+ { gk104_clkgate_blcg_init_gpc_tpbus_0 },
+ { gk104_clkgate_blcg_init_gpc_zcull_0 },
+ { gk104_clkgate_blcg_init_gpc_tpconf_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_1 },
+ { gk110_clkgate_blcg_init_gpc_gcc_0 },
+ { gk104_clkgate_blcg_init_gpc_ffb_0 },
+ { gk104_clkgate_blcg_init_gpc_tex_0 },
+ { gk104_clkgate_blcg_init_gpc_poly_0 },
+ { gk110_clkgate_blcg_init_gpc_l1c_0 },
+ { gk104_clkgate_blcg_init_gpc_unk_2 },
+ { gk110_clkgate_blcg_init_gpc_mp_0 },
+ { gk104_clkgate_blcg_init_gpc_ppc_0 },
+ { gk104_clkgate_blcg_init_rop_zrop_0 },
+ { gk104_clkgate_blcg_init_rop_0 },
+ { gk104_clkgate_blcg_init_rop_crop_0 },
+ { gk104_clkgate_blcg_init_pxbar_0 },
+ { gk110_clkgate_slcg_init_main_0 },
+ { gk110_clkgate_slcg_init_unk_0 },
+ { gk110_clkgate_slcg_init_sked_0 },
+ { gk110_clkgate_slcg_init_gpc_ctxctl_0 },
+ { gk110_clkgate_slcg_init_gpc_unk_0 },
+ { gk110_clkgate_slcg_init_gpc_esetup_0 },
+ { gk110_clkgate_slcg_init_gpc_zcull_0 },
+ { gk110_clkgate_slcg_init_gpc_l1c_0 },
+ { gk110_clkgate_slcg_init_gpc_unk_1 },
+ { gk110_clkgate_slcg_init_gpc_mp_0 },
+ { gk110_clkgate_slcg_init_gpc_ppc_0 },
+ { gk110_clkgate_slcg_init_pcounter_0 },
+ {}
+};
+
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@@ -192,6 +346,7 @@ gk110_gr = {
.rops = gf100_gr_rops,
.ppc_nr = 2,
.grctx = &gk110_grctx,
+ .clkgate_pack = gk110_clkgate_pack,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index dde89a4a0f5b..53859b6254d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
args->v0.id = di;
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strncpy(args->v0.name, dom->name, sizeof(args->v0.name));
+ strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
/* Currently only global counters (PCOUNTER) are implemented
* but this will be different for local counters (MP). */
@@ -514,7 +514,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
"/%s/%02x", dom->name, si);
} else {
strncpy(args->v0.name, sig->name,
- sizeof(args->v0.name));
+ sizeof(args->v0.name) - 1);
}
args->v0.signal = si;
@@ -572,7 +572,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
args->v0.source = sig->source[si];
args->v0.mask = src->mask;
- strncpy(args->v0.name, src->name, sizeof(args->v0.name));
+ strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
}
if (++si < source_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 77273b53672c..58a59b7db2e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -505,6 +505,7 @@ nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
ret = msgqueue_0137bca5_new(falcon, sb, queue);
break;
case 0x0148cdec:
+ case 0x015ccf3e:
ret = msgqueue_0148cdec_new(falcon, sb, queue);
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 96e0941c8edd..f0a26881d9b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -110,6 +110,7 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
struct nvkm_device *device = clk->base.subdev.device;
u32 ctrl = nvkm_rd32(device, pll + 0);
u32 sclk = 0, P = 1, N = 1, M = 1;
+ u32 MP;
if (!(ctrl & 0x00000008)) {
if (ctrl & 0x00000001) {
@@ -130,10 +131,12 @@ read_pll(struct gt215_clk *clk, int idx, u32 pll)
sclk = read_clk(clk, 0x10 + idx, false);
}
- if (M * P)
- return sclk * N / (M * P);
+ MP = M * P;
- return 0;
+ if (!MP)
+ return 0;
+
+ return sclk * N / MP;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 2571530e82f1..b4f22cce5d43 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -22,6 +22,7 @@ nvkm-y += nvkm/subdev/fb/mcp89.o
nvkm-y += nvkm/subdev/fb/gf100.o
nvkm-y += nvkm/subdev/fb/gf108.o
nvkm-y += nvkm/subdev/fb/gk104.o
+nvkm-y += nvkm/subdev/fb/gk110.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
nvkm-y += nvkm/subdev/fb/gm200.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 47d28c279707..cdc4e0a2cc6b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -26,6 +26,7 @@
#include <core/memory.h>
#include <core/option.h>
+#include <subdev/therm.h>
void
gf100_fb_intr(struct nvkm_fb *base)
@@ -92,6 +93,11 @@ gf100_fb_init(struct nvkm_fb *base)
if (fb->r100c10_page)
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
+
+ if (base->func->clkgate_pack) {
+ nvkm_therm_clkgate_init(device->therm,
+ base->func->clkgate_pack);
+ }
}
void *
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index 0a6e8eaad42c..48fd98e08baa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -20,10 +20,56 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
+ * Lyude Paul
*/
+#include "gk104.h"
#include "gf100.h"
#include "ram.h"
+/*
+ *******************************************************************************
+ * PGRAPH registers for clockgating
+ *******************************************************************************
+ */
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_unk_0[] = {
+ { 0x100d10, 1, 0x0000c244 },
+ { 0x100d30, 1, 0x0000c242 },
+ { 0x100d3c, 1, 0x00000242 },
+ { 0x100d48, 1, 0x00000242 },
+ { 0x100d1c, 1, 0x00000042 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_vm_0[] = {
+ { 0x100c98, 1, 0x00000242 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_main_0[] = {
+ { 0x10f000, 1, 0x00000042 },
+ { 0x17e030, 1, 0x00000044 },
+ { 0x17e040, 1, 0x00000044 },
+ {}
+};
+
+const struct nvkm_therm_clkgate_init
+gk104_fb_clkgate_blcg_init_bcast_0[] = {
+ { 0x17ea60, 4, 0x00000044 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk104_fb_clkgate_pack[] = {
+ { gk104_fb_clkgate_blcg_init_unk_0 },
+ { gk104_fb_clkgate_blcg_init_vm_0 },
+ { gk104_fb_clkgate_blcg_init_main_0 },
+ { gk104_fb_clkgate_blcg_init_bcast_0 },
+ {}
+};
+
static const struct nvkm_fb_func
gk104_fb = {
.dtor = gf100_fb_dtor,
@@ -33,6 +79,7 @@ gk104_fb = {
.intr = gf100_fb_intr,
.ram_new = gk104_ram_new,
.default_bigpage = 17,
+ .clkgate_pack = gk104_fb_clkgate_pack,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
new file mode 100644
index 000000000000..b3c78e4ff706
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+
+#ifndef __GK104_FB_H__
+#define __GK104_FB_H__
+
+#include <subdev/therm.h>
+
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_unk_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_vm_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_main_0[];
+extern const struct nvkm_therm_clkgate_init gk104_fb_clkgate_blcg_init_bcast_0[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
new file mode 100644
index 000000000000..0695e5dd360e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk110.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include "gf100.h"
+#include "gk104.h"
+#include "ram.h"
+#include <subdev/therm.h>
+#include <subdev/fb.h>
+
+/*
+ *******************************************************************************
+ * PGRAPH registers for clockgating
+ *******************************************************************************
+ */
+
+static const struct nvkm_therm_clkgate_init
+gk110_fb_clkgate_blcg_init_unk_0[] = {
+ { 0x100d10, 1, 0x0000c242 },
+ { 0x100d30, 1, 0x0000c242 },
+ { 0x100d3c, 1, 0x00000242 },
+ { 0x100d48, 1, 0x0000c242 },
+ { 0x100d1c, 1, 0x00000042 },
+ {}
+};
+
+static const struct nvkm_therm_clkgate_pack
+gk110_fb_clkgate_pack[] = {
+ { gk110_fb_clkgate_blcg_init_unk_0 },
+ { gk104_fb_clkgate_blcg_init_vm_0 },
+ { gk104_fb_clkgate_blcg_init_main_0 },
+ { gk104_fb_clkgate_blcg_init_bcast_0 },
+ {}
+};
+
+static const struct nvkm_fb_func
+gk110_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
+ .ram_new = gk104_ram_new,
+ .default_bigpage = 17,
+ .clkgate_pack = gk110_fb_clkgate_pack,
+};
+
+int
+gk110_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gk110_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 9351188d5d76..414a423e0e55 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -3,6 +3,7 @@
#define __NVKM_FB_PRIV_H__
#define nvkm_fb(p) container_of((p), struct nvkm_fb, subdev)
#include <subdev/fb.h>
+#include <subdev/therm.h>
struct nvkm_bios;
struct nvkm_fb_func {
@@ -27,6 +28,7 @@ struct nvkm_fb_func {
int (*ram_new)(struct nvkm_fb *, struct nvkm_ram **);
u8 default_bigpage;
+ const struct nvkm_therm_clkgate_pack *clkgate_pack;
};
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index fa81d0c1ba41..37b201b95f15 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
} else
return ret;
- if (IS_ERR((memory = nvkm_umem_search(client, handle)))) {
+ memory = nvkm_umem_search(client, handle);
+ if (IS_ERR(memory)) {
VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory));
return PTR_ERR(memory);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index e35d3e17cd7c..93946dcee319 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -642,7 +642,7 @@ nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
else
block = (size >> page[i].shift) << page[i].shift;
} else {
- block = (size >> page[i].shift) << page[i].shift;;
+ block = (size >> page[i].shift) << page[i].shift;
}
/* Perform operation. */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
index 53d01fb00a8b..1dbe593e5960 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x00000756,
- 0x00000748,
+ 0x00000754,
+ 0x00000746,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000075a,
0x00000758,
+ 0x00000756,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000b8a,
- 0x00000a2d,
+ 0x00000b88,
+ 0x00000a2b,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000bb3,
- 0x00000b8c,
+ 0x00000bb1,
+ 0x00000b8a,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000bbf,
0x00000bbd,
+ 0x00000bbb,
0x00000000,
0x00000000,
0x00000000,
@@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = {
0x000005d3,
0x00000003,
0x00000002,
- 0x0000069d,
+ 0x0000069b,
0x00040004,
0x00000000,
- 0x000006b9,
+ 0x000006b7,
0x00010005,
0x00000000,
- 0x000006d6,
+ 0x000006d4,
0x00010006,
0x00000000,
0x0000065b,
0x00000007,
0x00000000,
- 0x000006e1,
+ 0x000006df,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = {
/* 0x065b: memx_func_wait_vblank */
0x9800f840,
0x66b00016,
- 0x130bf400,
+ 0x120bf400,
0xf40166b0,
0x0ef4060b,
/* 0x066d: memx_func_wait_vblank_head1 */
- 0x2077f12e,
- 0x070ef400,
-/* 0x0674: memx_func_wait_vblank_head0 */
- 0x000877f1,
-/* 0x0678: memx_func_wait_vblank_0 */
- 0x07c467f1,
- 0xcf0664b6,
- 0x67fd0066,
- 0xf31bf404,
-/* 0x0688: memx_func_wait_vblank_1 */
- 0x07c467f1,
- 0xcf0664b6,
- 0x67fd0066,
- 0xf30bf404,
-/* 0x0698: memx_func_wait_vblank_fini */
- 0xf80410b6,
-/* 0x069d: memx_func_wr32 */
- 0x00169800,
- 0xb6011598,
- 0x60f90810,
- 0xd0fc50f9,
- 0x21f4e0fc,
- 0x0242b640,
- 0xf8e91bf4,
-/* 0x06b9: memx_func_wait */
- 0x2c87f000,
- 0xcf0684b6,
- 0x1e980088,
- 0x011d9800,
- 0x98021c98,
- 0x10b6031b,
- 0xa321f410,
-/* 0x06d6: memx_func_delay */
- 0x1e9800f8,
- 0x0410b600,
- 0xf87e21f4,
-/* 0x06e1: memx_func_train */
-/* 0x06e3: memx_exec */
- 0xf900f800,
- 0xb9d0f9e0,
- 0xb2b902c1,
-/* 0x06ed: memx_exec_next */
- 0x00139802,
- 0xe70410b6,
- 0xe701f034,
- 0xb601e033,
- 0x30f00132,
- 0xde35980c,
- 0x12b855f9,
- 0xe41ef406,
- 0x98f10b98,
- 0xcbbbf20c,
- 0xc4b7f102,
- 0x06b4b607,
- 0xfc00bbcf,
- 0xf5e0fcd0,
- 0xf8033621,
-/* 0x0729: memx_info */
- 0x01c67000,
-/* 0x072f: memx_info_data */
- 0xf10e0bf4,
- 0xf103ccc7,
- 0xf40800b7,
-/* 0x073a: memx_info_train */
- 0xc7f10b0e,
- 0xb7f10bcc,
-/* 0x0742: memx_info_send */
- 0x21f50100,
- 0x00f80336,
-/* 0x0748: memx_recv */
- 0xf401d6b0,
- 0xd6b0980b,
- 0xd80bf400,
-/* 0x0756: memx_init */
- 0x00f800f8,
-/* 0x0758: perf_recv */
-/* 0x075a: perf_init */
+ 0x2077f02c,
+/* 0x0673: memx_func_wait_vblank_head0 */
+ 0xf0060ef4,
+/* 0x0676: memx_func_wait_vblank_0 */
+ 0x67f10877,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x0686: memx_func_wait_vblank_1 */
+ 0x67f1f31b,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x0696: memx_func_wait_vblank_fini */
+ 0x10b6f30b,
+/* 0x069b: memx_func_wr32 */
+ 0x9800f804,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
+ 0xe0fcd0fc,
+ 0xb64021f4,
+ 0x1bf40242,
+/* 0x06b7: memx_func_wait */
+ 0xf000f8e9,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f8a321,
+/* 0x06d4: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x06df: memx_func_train */
+ 0xf800f87e,
+/* 0x06e1: memx_exec */
+ 0xf9e0f900,
+ 0x02c1b9d0,
+/* 0x06eb: memx_exec_next */
+ 0x9802b2b9,
+ 0x10b60013,
+ 0xf034e704,
+ 0xe033e701,
+ 0x0132b601,
+ 0x980c30f0,
+ 0x55f9de35,
+ 0xf40612b8,
+ 0x0b98e41e,
+ 0xf20c98f1,
+ 0xf102cbbb,
+ 0xb607c4b7,
+ 0xbbcf06b4,
+ 0xfcd0fc00,
+ 0x3621f5e0,
+/* 0x0727: memx_info */
+ 0x7000f803,
+ 0x0bf401c6,
+/* 0x072d: memx_info_data */
+ 0xccc7f10e,
+ 0x00b7f103,
+ 0x0b0ef408,
+/* 0x0738: memx_info_train */
+ 0x0bccc7f1,
+ 0x0100b7f1,
+/* 0x0740: memx_info_send */
+ 0x033621f5,
+/* 0x0746: memx_recv */
+ 0xd6b000f8,
+ 0x980bf401,
+ 0xf400d6b0,
+ 0x00f8d80b,
+/* 0x0754: memx_init */
+/* 0x0756: perf_recv */
0x00f800f8,
-/* 0x075c: i2c_drive_scl */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0770: i2c_drive_scl_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x077e: i2c_drive_sda */
+/* 0x0758: perf_init */
+/* 0x075a: i2c_drive_scl */
0x36b000f8,
0x110bf400,
0x07e007f1,
0xd00604b6,
- 0x04bd0002,
-/* 0x0792: i2c_drive_sda_lo */
+ 0x04bd0001,
+/* 0x076e: i2c_drive_scl_lo */
0x07f100f8,
0x04b607e4,
+ 0x0001d006,
+ 0x00f804bd,
+/* 0x077c: i2c_drive_sda */
+ 0xf40036b0,
+ 0x07f1110b,
+ 0x04b607e0,
0x0002d006,
0x00f804bd,
-/* 0x07a0: i2c_sense_scl */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0431fd00,
- 0xf4060bf4,
-/* 0x07b6: i2c_sense_scl_done */
- 0x00f80131,
-/* 0x07b8: i2c_sense_sda */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x07ce: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x07d0: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x5c21f501,
-/* 0x07dd: i2c_raise_scl_wait */
- 0xe8e7f107,
- 0x7e21f403,
- 0x07a021f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x07f1: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x07f5: i2c_start */
- 0xa021f500,
- 0x0d11f407,
- 0x07b821f5,
- 0xf40611f4,
-/* 0x0806: i2c_start_rep */
- 0x37f0300e,
- 0x5c21f500,
- 0x0137f007,
- 0x077e21f5,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xd021f550,
- 0x0464b607,
-/* 0x0833: i2c_start_send */
- 0xf01f11f4,
+/* 0x0790: i2c_drive_sda_lo */
+ 0x07e407f1,
+ 0xd00604b6,
+ 0x04bd0002,
+/* 0x079e: i2c_sense_scl */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0634b607,
+ 0xfd0033cf,
+ 0x0bf40431,
+ 0x0131f406,
+/* 0x07b4: i2c_sense_scl_done */
+/* 0x07b6: i2c_sense_sda */
+ 0x32f400f8,
+ 0xc437f101,
+ 0x0634b607,
+ 0xfd0033cf,
+ 0x0bf40432,
+ 0x0131f406,
+/* 0x07cc: i2c_sense_sda_done */
+/* 0x07ce: i2c_raise_scl */
+ 0x40f900f8,
+ 0x089847f1,
+ 0xf50137f0,
+/* 0x07db: i2c_raise_scl_wait */
+ 0xf1075a21,
+ 0xf403e8e7,
+ 0x21f57e21,
+ 0x01f4079e,
+ 0x0142b609,
+/* 0x07ef: i2c_raise_scl_done */
+ 0xfcef1bf4,
+/* 0x07f3: i2c_start */
+ 0xf500f840,
+ 0xf4079e21,
+ 0x21f50d11,
+ 0x11f407b6,
+ 0x300ef406,
+/* 0x0804: i2c_start_rep */
+ 0xf50037f0,
+ 0xf0075a21,
+ 0x21f50137,
+ 0x76bb077c,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb607ce21,
+ 0x11f40464,
+/* 0x0831: i2c_start_send */
+ 0x0037f01f,
+ 0x077c21f5,
+ 0x1388e7f1,
+ 0xf07e21f4,
0x21f50037,
- 0xe7f1077e,
+ 0xe7f1075a,
0x21f41388,
- 0x0037f07e,
- 0x075c21f5,
- 0x1388e7f1,
-/* 0x084f: i2c_start_out */
- 0xf87e21f4,
-/* 0x0851: i2c_stop */
- 0x0037f000,
- 0x075c21f5,
- 0xf50037f0,
- 0xf1077e21,
- 0xf403e8e7,
- 0x37f07e21,
- 0x5c21f501,
- 0x88e7f107,
- 0x7e21f413,
+/* 0x084d: i2c_start_out */
+/* 0x084f: i2c_stop */
+ 0xf000f87e,
+ 0x21f50037,
+ 0x37f0075a,
+ 0x7c21f500,
+ 0xe8e7f107,
+ 0x7e21f403,
0xf50137f0,
- 0xf1077e21,
+ 0xf1075a21,
0xf41388e7,
- 0x00f87e21,
-/* 0x0884: i2c_bitw */
- 0x077e21f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x07d021f5,
- 0xf40464b6,
- 0xe7f11811,
- 0x21f41388,
- 0x0037f07e,
- 0x075c21f5,
- 0x1388e7f1,
-/* 0x08c3: i2c_bitw_out */
- 0xf87e21f4,
-/* 0x08c5: i2c_bitr */
- 0x0137f000,
- 0x077e21f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x07d021f5,
- 0xf40464b6,
- 0x21f51b11,
- 0x37f007b8,
- 0x5c21f500,
+ 0x37f07e21,
+ 0x7c21f501,
0x88e7f107,
0x7e21f413,
- 0xf4013cf0,
-/* 0x090a: i2c_bitr_done */
- 0x00f80131,
-/* 0x090c: i2c_get_byte */
- 0xf00057f0,
-/* 0x0912: i2c_get_byte_next */
- 0x54b60847,
- 0x0076bb01,
+/* 0x0882: i2c_bitw */
+ 0x21f500f8,
+ 0xe7f1077c,
+ 0x21f403e8,
+ 0x0076bb7e,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608c5,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x8421f550,
- 0x0464b608,
-/* 0x095c: i2c_get_byte_done */
-/* 0x095e: i2c_put_byte */
- 0x47f000f8,
-/* 0x0961: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+ 0x64b607ce,
+ 0x1811f404,
+ 0x1388e7f1,
+ 0xf07e21f4,
+ 0x21f50037,
+ 0xe7f1075a,
+ 0x21f41388,
+/* 0x08c1: i2c_bitw_out */
+/* 0x08c3: i2c_bitr */
+ 0xf000f87e,
+ 0x21f50137,
+ 0xe7f1077c,
+ 0x21f403e8,
+ 0x0076bb7e,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b607ce,
+ 0x1b11f404,
+ 0x07b621f5,
+ 0xf50037f0,
+ 0xf1075a21,
+ 0xf41388e7,
+ 0x3cf07e21,
+ 0x0131f401,
+/* 0x0908: i2c_bitr_done */
+/* 0x090a: i2c_get_byte */
+ 0x57f000f8,
+ 0x0847f000,
+/* 0x0910: i2c_get_byte_next */
+ 0xbb0154b6,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x088421f5,
+ 0x08c321f5,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0xc521f550,
- 0x0464b608,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x09b7: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x09b9: i2c_addr */
- 0x0076bb00,
+ 0x53fd2b11,
+ 0x0142b605,
+ 0xf0d81bf4,
+ 0x76bb0137,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb6088221,
+/* 0x095a: i2c_get_byte_done */
+ 0x00f80464,
+/* 0x095c: i2c_put_byte */
+/* 0x095f: i2c_put_byte_next */
+ 0xb60847f0,
+ 0x54ff0142,
+ 0x0076bb38,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b607f5,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0x64b60882,
+ 0x3411f404,
+ 0xf40046b0,
+ 0x76bbd81b,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb6095e21,
-/* 0x09fe: i2c_addr_done */
- 0x00f80464,
-/* 0x0a00: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b702e4,
- 0xee980d1c,
-/* 0x0a0f: i2c_acquire */
- 0xf500f800,
- 0xf40a0021,
- 0xd9f00421,
- 0x4021f403,
-/* 0x0a1e: i2c_release */
- 0x21f500f8,
- 0x21f40a00,
- 0x03daf004,
- 0xf84021f4,
-/* 0x0a2d: i2c_recv */
- 0x0132f400,
- 0xb6f8c1c7,
- 0x16b00214,
- 0x3a1ff528,
- 0xf413a001,
- 0x0032980c,
- 0x0ccc13a0,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+ 0xb608c321,
+ 0x11f40464,
+ 0x0076bb0f,
+ 0xf40136b0,
+ 0x32f4061b,
+/* 0x09b5: i2c_put_byte_done */
+/* 0x09b7: i2c_addr */
+ 0xbb00f801,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a0f21f5,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b31bf5,
- 0xbb0057f0,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x09b921f5,
- 0xf50464b6,
- 0xc700d011,
- 0x76bbe0c5,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0xf550fc04,
- 0xb6095e21,
- 0x11f50464,
- 0x57f000ad,
+ 0x07f321f5,
+ 0xf40464b6,
+ 0xc3e72911,
+ 0x34b6012e,
+ 0x0553fd01,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x5c21f550,
+ 0x0464b609,
+/* 0x09fc: i2c_addr_done */
+/* 0x09fe: i2c_acquire_addr */
+ 0xcec700f8,
+ 0x02e4b6f8,
+ 0x0d1ce0b7,
+ 0xf800ee98,
+/* 0x0a0d: i2c_acquire */
+ 0xfe21f500,
+ 0x0421f409,
+ 0xf403d9f0,
+ 0x00f84021,
+/* 0x0a1c: i2c_release */
+ 0x09fe21f5,
+ 0xf00421f4,
+ 0x21f403da,
+/* 0x0a2b: i2c_recv */
+ 0xf400f840,
+ 0xc1c70132,
+ 0x0214b6f8,
+ 0xf52816b0,
+ 0xa0013a1f,
+ 0x980cf413,
+ 0x13a00032,
+ 0x31980ccc,
+ 0x0231f400,
+ 0xe0f9d0f9,
+ 0x67f1d0f9,
+ 0x63f10000,
+ 0x67921000,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b609b9,
- 0x8a11f504,
+ 0x64b60a0d,
+ 0xb0d0fc04,
+ 0x1bf500d6,
+ 0x57f000b3,
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b6090c,
- 0x6a11f404,
- 0xbbe05bcb,
+ 0x64b609b7,
+ 0xd011f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x5c21f550,
+ 0x0464b609,
+ 0x00ad11f5,
+ 0xbb0157f0,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x085121f5,
- 0xb90464b6,
- 0x74bd025b,
-/* 0x0b33: i2c_recv_not_rd08 */
- 0xb0430ef4,
- 0x1bf401d6,
- 0x0057f03d,
- 0x09b921f5,
- 0xc73311f4,
- 0x21f5e0c5,
- 0x11f4095e,
- 0x0057f029,
- 0x09b921f5,
- 0xc71f11f4,
- 0x21f5e0b5,
- 0x11f4095e,
- 0x5121f515,
- 0xc774bd08,
- 0x1bf408c5,
- 0x0232f409,
-/* 0x0b73: i2c_recv_not_wr08 */
-/* 0x0b73: i2c_recv_done */
- 0xc7030ef4,
- 0x21f5f8ce,
- 0xe0fc0a1e,
- 0x12f4d0fc,
- 0x027cb90a,
- 0x033621f5,
-/* 0x0b88: i2c_recv_exit */
-/* 0x0b8a: i2c_init */
- 0x00f800f8,
-/* 0x0b8c: test_recv */
- 0x05d817f1,
+ 0x09b721f5,
+ 0xf50464b6,
+ 0xbb008a11,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x090a21f5,
+ 0xf40464b6,
+ 0x5bcb6a11,
+ 0x0076bbe0,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b6084f,
+ 0x025bb904,
+ 0x0ef474bd,
+/* 0x0b31: i2c_recv_not_rd08 */
+ 0x01d6b043,
+ 0xf03d1bf4,
+ 0x21f50057,
+ 0x11f409b7,
+ 0xe0c5c733,
+ 0x095c21f5,
+ 0xf02911f4,
+ 0x21f50057,
+ 0x11f409b7,
+ 0xe0b5c71f,
+ 0x095c21f5,
+ 0xf51511f4,
+ 0xbd084f21,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x0b71: i2c_recv_not_wr08 */
+/* 0x0b71: i2c_recv_done */
+ 0xf8cec703,
+ 0x0a1c21f5,
+ 0xd0fce0fc,
+ 0xb90a12f4,
+ 0x21f5027c,
+/* 0x0b86: i2c_recv_exit */
+ 0x00f80336,
+/* 0x0b88: i2c_init */
+/* 0x0b8a: test_recv */
+ 0x17f100f8,
+ 0x14b605d8,
+ 0x0011cf06,
+ 0xf10110b6,
+ 0xb605d807,
+ 0x01d00604,
+ 0xf104bd00,
+ 0xf1d900e7,
+ 0xf5134fe3,
+ 0xf8025621,
+/* 0x0bb1: test_init */
+ 0x00e7f100,
+ 0x5621f508,
+/* 0x0bbb: idle_recv */
+ 0xf800f802,
+/* 0x0bbd: idle */
+ 0x0031f400,
+ 0x05d417f1,
0xcf0614b6,
0x10b60011,
- 0xd807f101,
+ 0xd407f101,
0x0604b605,
0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0x5621f513,
-/* 0x0bb3: test_init */
- 0xf100f802,
- 0xf50800e7,
- 0xf8025621,
-/* 0x0bbd: idle_recv */
-/* 0x0bbf: idle */
- 0xf400f800,
- 0x17f10031,
- 0x14b605d4,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d407,
- 0x01d00604,
-/* 0x0bdb: idle_loop */
- 0xf004bd00,
- 0x32f45817,
-/* 0x0be1: idle_proc */
-/* 0x0be1: idle_proc_exec */
- 0xb910f902,
- 0x21f5021e,
- 0x10fc033f,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0bf5: idle_proc_next */
- 0x5810b6ef,
- 0xf4061fb8,
- 0x02f4e61b,
- 0x0028f4dd,
- 0x00bb0ef4,
+/* 0x0bd9: idle_loop */
+ 0x5817f004,
+/* 0x0bdf: idle_proc */
+/* 0x0bdf: idle_proc_exec */
+ 0xf90232f4,
+ 0x021eb910,
+ 0x033f21f5,
+ 0x11f410fc,
+ 0x0231f409,
+/* 0x0bf3: idle_proc_next */
+ 0xb6ef0ef4,
+ 0x1fb85810,
+ 0xe61bf406,
+ 0xf4dd02f4,
+ 0x0ef40028,
+ 0x000000bb,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
index c4edbc79e41a..e0222cb832fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
@@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x000005f3,
- 0x000005e5,
+ 0x000005ee,
+ 0x000005e0,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x000005f7,
- 0x000005f5,
+ 0x000005f2,
+ 0x000005f0,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x000009f8,
- 0x000008a2,
+ 0x000009f3,
+ 0x0000089d,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000a16,
- 0x000009fa,
+ 0x00000a11,
+ 0x000009f5,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000a21,
- 0x00000a1f,
+ 0x00000a1c,
+ 0x00000a1a,
0x00000000,
0x00000000,
0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = {
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000004cf,
+ 0x000004cc,
0x00000003,
0x00000002,
- 0x00000546,
+ 0x00000541,
0x00040004,
0x00000000,
- 0x00000563,
+ 0x0000055e,
0x00010005,
0x00000000,
- 0x0000057d,
+ 0x00000578,
0x00010006,
0x00000000,
- 0x00000541,
+ 0x0000053c,
0x00000007,
0x00000000,
- 0x00000589,
+ 0x00000584,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = {
0x0001f604,
0x00f804bd,
/* 0x045c: memx_func_enter */
- 0x162067f1,
- 0xf55d77f1,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90487fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0x0700002d,
- 0x7e6eb2fe,
+ 0x47162046,
+ 0x6eb2f55d,
+ 0x0000047e,
+ 0x87fdd8b2,
+ 0xf960f904,
+ 0xfcd0fc80,
+ 0x002d7ee0,
+ 0xb2fe0700,
+ 0x00047e6e,
+ 0xfdd8b200,
+ 0x60f90487,
+ 0xd0fc80f9,
+ 0x2d7ee0fc,
+ 0xf0460000,
+ 0x7e6eb226,
0xb2000004,
0x0487fdd8,
0x80f960f9,
0xe0fcd0fc,
0x00002d7e,
- 0x26f067f1,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90487fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0x0600002d,
- 0x07e04004,
- 0xbd0006f6,
-/* 0x04b9: memx_func_enter_wait */
- 0x07c04604,
- 0xf00066cf,
- 0x0bf40464,
- 0xcf2c06f7,
- 0x06b50066,
-/* 0x04cf: memx_func_leave */
- 0x0600f8f1,
- 0x0066cf2c,
- 0x06f206b5,
- 0x07e44004,
- 0xbd0006f6,
-/* 0x04e1: memx_func_leave_wait */
- 0x07c04604,
- 0xf00066cf,
- 0x1bf40464,
- 0xf067f1f7,
+ 0xe0400406,
+ 0x0006f607,
+/* 0x04b6: memx_func_enter_wait */
+ 0xc04604bd,
+ 0x0066cf07,
+ 0xf40464f0,
+ 0x2c06f70b,
+ 0xb50066cf,
+ 0x00f8f106,
+/* 0x04cc: memx_func_leave */
+ 0x66cf2c06,
+ 0xf206b500,
+ 0xe4400406,
+ 0x0006f607,
+/* 0x04de: memx_func_leave_wait */
+ 0xc04604bd,
+ 0x0066cf07,
+ 0xf40464f0,
+ 0xf046f71b,
0xb2010726,
0x00047e6e,
0xfdd8b200,
0x60f90587,
0xd0fc80f9,
0x2d7ee0fc,
- 0x67f10000,
- 0x6eb21620,
- 0x0000047e,
- 0x87fdd8b2,
- 0xf960f905,
- 0xfcd0fc80,
- 0x002d7ee0,
- 0x0aa24700,
- 0x047e6eb2,
- 0xd8b20000,
- 0xf90587fd,
- 0xfc80f960,
- 0x7ee0fcd0,
- 0xf800002d,
-/* 0x0541: memx_func_wait_vblank */
+ 0x20460000,
+ 0x7e6eb216,
+ 0xb2000004,
+ 0x0587fdd8,
+ 0x80f960f9,
+ 0xe0fcd0fc,
+ 0x00002d7e,
+ 0xb20aa247,
+ 0x00047e6e,
+ 0xfdd8b200,
+ 0x60f90587,
+ 0xd0fc80f9,
+ 0x2d7ee0fc,
+ 0x00f80000,
+/* 0x053c: memx_func_wait_vblank */
+ 0xf80410b6,
+/* 0x0541: memx_func_wr32 */
+ 0x00169800,
+ 0xb6011598,
+ 0x60f90810,
+ 0xd0fc50f9,
+ 0x2d7ee0fc,
+ 0x42b60000,
+ 0xe81bf402,
+/* 0x055e: memx_func_wait */
+ 0x2c0800f8,
+ 0x980088cf,
+ 0x1d98001e,
+ 0x021c9801,
+ 0xb6031b98,
+ 0x747e1010,
+ 0x00f80000,
+/* 0x0578: memx_func_delay */
+ 0xb6001e98,
+ 0x587e0410,
+ 0x00f80000,
+/* 0x0584: memx_func_train */
+/* 0x0586: memx_exec */
+ 0xe0f900f8,
+ 0xc1b2d0f9,
+/* 0x058e: memx_exec_next */
+ 0x1398b2b2,
0x0410b600,
-/* 0x0546: memx_func_wr32 */
- 0x169800f8,
- 0x01159800,
- 0xf90810b6,
- 0xfc50f960,
+ 0x01f034e7,
+ 0x01e033e7,
+ 0xf00132b6,
+ 0x35980c30,
+ 0xa655f9de,
+ 0xe51ef412,
+ 0x98f10b98,
+ 0xcbbbf20c,
+ 0x07c44b02,
+ 0xfc00bbcf,
0x7ee0fcd0,
- 0xb600002d,
- 0x1bf40242,
-/* 0x0563: memx_func_wait */
- 0x0800f8e8,
- 0x0088cf2c,
- 0x98001e98,
- 0x1c98011d,
- 0x031b9802,
- 0x7e1010b6,
- 0xf8000074,
-/* 0x057d: memx_func_delay */
- 0x001e9800,
- 0x7e0410b6,
- 0xf8000058,
-/* 0x0589: memx_func_train */
-/* 0x058b: memx_exec */
- 0xf900f800,
- 0xb2d0f9e0,
-/* 0x0593: memx_exec_next */
- 0x98b2b2c1,
- 0x10b60013,
- 0xf034e704,
- 0xe033e701,
- 0x0132b601,
- 0x980c30f0,
- 0x55f9de35,
- 0x1ef412a6,
- 0xf10b98e5,
- 0xbbf20c98,
- 0xc44b02cb,
- 0x00bbcf07,
- 0xe0fcd0fc,
- 0x00029f7e,
-/* 0x05ca: memx_info */
- 0xc67000f8,
- 0x0c0bf401,
-/* 0x05d0: memx_info_data */
- 0x4b03cc4c,
- 0x0ef40800,
-/* 0x05d9: memx_info_train */
- 0x0bcc4c09,
-/* 0x05df: memx_info_send */
- 0x7e01004b,
0xf800029f,
-/* 0x05e5: memx_recv */
- 0x01d6b000,
- 0xb0a30bf4,
- 0x0bf400d6,
-/* 0x05f3: memx_init */
- 0xf800f8dc,
-/* 0x05f5: perf_recv */
-/* 0x05f7: perf_init */
- 0xf800f800,
-/* 0x05f9: i2c_drive_scl */
- 0x0036b000,
- 0x400d0bf4,
- 0x01f607e0,
- 0xf804bd00,
-/* 0x0609: i2c_drive_scl_lo */
- 0x07e44000,
- 0xbd0001f6,
-/* 0x0613: i2c_drive_sda */
- 0xb000f804,
- 0x0bf40036,
- 0x07e0400d,
- 0xbd0002f6,
-/* 0x0623: i2c_drive_sda_lo */
- 0x4000f804,
- 0x02f607e4,
- 0xf804bd00,
-/* 0x062d: i2c_sense_scl */
- 0x0132f400,
- 0xcf07c443,
- 0x31fd0033,
- 0x060bf404,
-/* 0x063f: i2c_sense_scl_done */
- 0xf80131f4,
-/* 0x0641: i2c_sense_sda */
- 0x0132f400,
- 0xcf07c443,
- 0x32fd0033,
- 0x060bf404,
-/* 0x0653: i2c_sense_sda_done */
- 0xf80131f4,
-/* 0x0655: i2c_raise_scl */
- 0x4440f900,
- 0x01030898,
- 0x0005f97e,
-/* 0x0660: i2c_raise_scl_wait */
- 0x7e03e84e,
- 0x7e000058,
- 0xf400062d,
- 0x42b60901,
- 0xef1bf401,
-/* 0x0674: i2c_raise_scl_done */
- 0x00f840fc,
-/* 0x0678: i2c_start */
- 0x00062d7e,
- 0x7e0d11f4,
- 0xf4000641,
- 0x0ef40611,
-/* 0x0689: i2c_start_rep */
- 0x7e00032e,
- 0x030005f9,
- 0x06137e01,
+/* 0x05c5: memx_info */
+ 0x01c67000,
+/* 0x05cb: memx_info_data */
+ 0x4c0c0bf4,
+ 0x004b03cc,
+ 0x090ef408,
+/* 0x05d4: memx_info_train */
+ 0x4b0bcc4c,
+/* 0x05da: memx_info_send */
+ 0x9f7e0100,
+ 0x00f80002,
+/* 0x05e0: memx_recv */
+ 0xf401d6b0,
+ 0xd6b0a30b,
+ 0xdc0bf400,
+/* 0x05ee: memx_init */
+ 0x00f800f8,
+/* 0x05f0: perf_recv */
+/* 0x05f2: perf_init */
+ 0x00f800f8,
+/* 0x05f4: i2c_drive_scl */
+ 0xf40036b0,
+ 0xe0400d0b,
+ 0x0001f607,
+ 0x00f804bd,
+/* 0x0604: i2c_drive_scl_lo */
+ 0xf607e440,
+ 0x04bd0001,
+/* 0x060e: i2c_drive_sda */
+ 0x36b000f8,
+ 0x0d0bf400,
+ 0xf607e040,
+ 0x04bd0002,
+/* 0x061e: i2c_drive_sda_lo */
+ 0xe44000f8,
+ 0x0002f607,
+ 0x00f804bd,
+/* 0x0628: i2c_sense_scl */
+ 0x430132f4,
+ 0x33cf07c4,
+ 0x0431fd00,
+ 0xf4060bf4,
+/* 0x063a: i2c_sense_scl_done */
+ 0x00f80131,
+/* 0x063c: i2c_sense_sda */
+ 0x430132f4,
+ 0x33cf07c4,
+ 0x0432fd00,
+ 0xf4060bf4,
+/* 0x064e: i2c_sense_sda_done */
+ 0x00f80131,
+/* 0x0650: i2c_raise_scl */
+ 0x984440f9,
+ 0x7e010308,
+/* 0x065b: i2c_raise_scl_wait */
+ 0x4e0005f4,
+ 0x587e03e8,
+ 0x287e0000,
+ 0x01f40006,
+ 0x0142b609,
+/* 0x066f: i2c_raise_scl_done */
+ 0xfcef1bf4,
+/* 0x0673: i2c_start */
+ 0x7e00f840,
+ 0xf4000628,
+ 0x3c7e0d11,
+ 0x11f40006,
+ 0x2e0ef406,
+/* 0x0684: i2c_start_rep */
+ 0xf47e0003,
+ 0x01030005,
+ 0x00060e7e,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x06507e50,
+ 0x0464b600,
+/* 0x06af: i2c_start_send */
+ 0x031d11f4,
+ 0x060e7e00,
+ 0x13884e00,
+ 0x0000587e,
+ 0xf47e0003,
+ 0x884e0005,
+ 0x00587e13,
+/* 0x06c9: i2c_start_out */
+/* 0x06cb: i2c_stop */
+ 0x0300f800,
+ 0x05f47e00,
+ 0x7e000300,
+ 0x4e00060e,
+ 0x587e03e8,
+ 0x01030000,
+ 0x0005f47e,
+ 0x7e13884e,
+ 0x03000058,
+ 0x060e7e01,
+ 0x13884e00,
+ 0x0000587e,
+/* 0x06fa: i2c_bitw */
+ 0x0e7e00f8,
+ 0xe84e0006,
+ 0x00587e03,
0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x557e50fc,
+ 0x507e50fc,
0x64b60006,
- 0x1d11f404,
-/* 0x06b4: i2c_start_send */
- 0x137e0003,
- 0x884e0006,
- 0x00587e13,
- 0x7e000300,
- 0x4e0005f9,
- 0x587e1388,
-/* 0x06ce: i2c_start_out */
- 0x00f80000,
-/* 0x06d0: i2c_stop */
- 0xf97e0003,
- 0x00030005,
- 0x0006137e,
- 0x7e03e84e,
+ 0x1711f404,
+ 0x7e13884e,
0x03000058,
- 0x05f97e01,
+ 0x05f47e00,
0x13884e00,
0x0000587e,
- 0x137e0103,
- 0x884e0006,
- 0x00587e13,
-/* 0x06ff: i2c_bitw */
- 0x7e00f800,
- 0x4e000613,
- 0x587e03e8,
- 0x76bb0000,
+/* 0x0738: i2c_bitw_out */
+/* 0x073a: i2c_bitr */
+ 0x010300f8,
+ 0x00060e7e,
+ 0x7e03e84e,
+ 0xbb000058,
+ 0x65b60076,
+ 0x9450f904,
+ 0x56bb0465,
+ 0xfd50bd02,
+ 0x50fc0475,
+ 0x0006507e,
+ 0xf40464b6,
+ 0x3c7e1a11,
+ 0x00030006,
+ 0x0005f47e,
+ 0x7e13884e,
+ 0xf0000058,
+ 0x31f4013c,
+/* 0x077d: i2c_bitr_done */
+/* 0x077f: i2c_get_byte */
+ 0x0500f801,
+/* 0x0783: i2c_get_byte_next */
+ 0xb6080400,
+ 0x76bb0154,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb6000655,
+ 0xb600073a,
0x11f40464,
- 0x13884e17,
- 0x0000587e,
- 0xf97e0003,
- 0x884e0005,
- 0x00587e13,
-/* 0x073d: i2c_bitw_out */
-/* 0x073f: i2c_bitr */
- 0x0300f800,
- 0x06137e01,
- 0x03e84e00,
- 0x0000587e,
+ 0x0553fd2a,
+ 0xf40142b6,
+ 0x0103d81b,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x06557e50,
+ 0x06fa7e50,
0x0464b600,
- 0x7e1a11f4,
- 0x03000641,
- 0x05f97e00,
- 0x13884e00,
- 0x0000587e,
- 0xf4013cf0,
-/* 0x0782: i2c_bitr_done */
- 0x00f80131,
-/* 0x0784: i2c_get_byte */
- 0x08040005,
-/* 0x0788: i2c_get_byte_next */
- 0xbb0154b6,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00073f7e,
- 0xf40464b6,
- 0x53fd2a11,
- 0x0142b605,
- 0x03d81bf4,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xff7e50fc,
- 0x64b60006,
-/* 0x07d1: i2c_get_byte_done */
-/* 0x07d3: i2c_put_byte */
- 0x0400f804,
-/* 0x07d5: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
+/* 0x07cc: i2c_get_byte_done */
+/* 0x07ce: i2c_put_byte */
+ 0x080400f8,
+/* 0x07d0: i2c_put_byte_next */
+ 0xff0142b6,
+ 0x76bb3854,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb60006fa,
+ 0x11f40464,
+ 0x0046b034,
+ 0xbbd81bf4,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0006ff7e,
+ 0x00073a7e,
0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x76bb0f11,
+ 0x0136b000,
+ 0xf4061bf4,
+/* 0x0826: i2c_put_byte_done */
+ 0x00f80132,
+/* 0x0828: i2c_addr */
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0x073f7e50,
+ 0x06737e50,
0x0464b600,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x082b: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x082d: i2c_addr */
- 0x0076bb00,
+ 0xe72911f4,
+ 0xb6012ec3,
+ 0x53fd0134,
+ 0x0076bb05,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0x787e50fc,
- 0x64b60006,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
- 0x0465b600,
- 0x659450f9,
- 0x0256bb04,
- 0x75fd50bd,
- 0x7e50fc04,
- 0xb60007d3,
-/* 0x0872: i2c_addr_done */
- 0x00f80464,
-/* 0x0874: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b705e4,
- 0x00f8d014,
-/* 0x0880: i2c_acquire */
- 0x0008747e,
+ 0xce7e50fc,
+ 0x64b60007,
+/* 0x086d: i2c_addr_done */
+/* 0x086f: i2c_acquire_addr */
+ 0xc700f804,
+ 0xe4b6f8ce,
+ 0x14e0b705,
+/* 0x087b: i2c_acquire */
+ 0x7e00f8d0,
+ 0x7e00086f,
+ 0xf0000004,
+ 0x2d7e03d9,
+ 0x00f80000,
+/* 0x088c: i2c_release */
+ 0x00086f7e,
0x0000047e,
- 0x7e03d9f0,
+ 0x7e03daf0,
0xf800002d,
-/* 0x0891: i2c_release */
- 0x08747e00,
- 0x00047e00,
- 0x03daf000,
- 0x00002d7e,
-/* 0x08a2: i2c_recv */
- 0x32f400f8,
- 0xf8c1c701,
- 0xb00214b6,
- 0x1ff52816,
- 0x13b80134,
- 0x98000cf4,
- 0x13b80032,
- 0x98000ccc,
- 0x31f40031,
- 0xf9d0f902,
- 0xd6d0f9e0,
- 0x10000000,
- 0xbb016792,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0008807e,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b01bf5,
- 0x76bb0005,
+/* 0x089d: i2c_recv */
+ 0x0132f400,
+ 0xb6f8c1c7,
+ 0x16b00214,
+ 0x341ff528,
+ 0xf413b801,
+ 0x3298000c,
+ 0xcc13b800,
+ 0x3198000c,
+ 0x0231f400,
+ 0xe0f9d0f9,
+ 0x00d6d0f9,
+ 0x92100000,
+ 0x76bb0167,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0x7e50fc04,
- 0xb600082d,
- 0x11f50464,
- 0xc5c700cc,
- 0x0076bbe0,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0xd37e50fc,
- 0x64b60007,
- 0xa911f504,
- 0xbb010500,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x00082d7e,
- 0xf50464b6,
- 0xbb008711,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x0007847e,
- 0xf40464b6,
- 0x5bcb6711,
- 0x0076bbe0,
+ 0xb600087b,
+ 0xd0fc0464,
+ 0xf500d6b0,
+ 0x0500b01b,
+ 0x0076bb00,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
- 0xd07e50fc,
- 0x64b60006,
- 0xbd5bb204,
- 0x410ef474,
-/* 0x09a4: i2c_recv_not_rd08 */
- 0xf401d6b0,
- 0x00053b1b,
- 0x00082d7e,
- 0xc73211f4,
- 0xd37ee0c5,
- 0x11f40007,
- 0x7e000528,
- 0xf400082d,
- 0xb5c71f11,
- 0x07d37ee0,
- 0x1511f400,
- 0x0006d07e,
- 0xc5c774bd,
- 0x091bf408,
- 0xf40232f4,
-/* 0x09e2: i2c_recv_not_wr08 */
-/* 0x09e2: i2c_recv_done */
- 0xcec7030e,
- 0x08917ef8,
- 0xfce0fc00,
- 0x0912f4d0,
- 0x9f7e7cb2,
-/* 0x09f6: i2c_recv_exit */
- 0x00f80002,
-/* 0x09f8: i2c_init */
-/* 0x09fa: test_recv */
- 0x584100f8,
- 0x0011cf04,
- 0x400110b6,
- 0x01f60458,
- 0xde04bd00,
- 0x134fd900,
- 0x0001de7e,
-/* 0x0a16: test_init */
- 0x004e00f8,
- 0x01de7e08,
-/* 0x0a1f: idle_recv */
+ 0x287e50fc,
+ 0x64b60008,
+ 0xcc11f504,
+ 0xe0c5c700,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x07ce7e50,
+ 0x0464b600,
+ 0x00a911f5,
+ 0x76bb0105,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb6000828,
+ 0x11f50464,
+ 0x76bb0087,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0x7e50fc04,
+ 0xb600077f,
+ 0x11f40464,
+ 0xe05bcb67,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x06cb7e50,
+ 0x0464b600,
+ 0x74bd5bb2,
+/* 0x099f: i2c_recv_not_rd08 */
+ 0xb0410ef4,
+ 0x1bf401d6,
+ 0x7e00053b,
+ 0xf4000828,
+ 0xc5c73211,
+ 0x07ce7ee0,
+ 0x2811f400,
+ 0x287e0005,
+ 0x11f40008,
+ 0xe0b5c71f,
+ 0x0007ce7e,
+ 0x7e1511f4,
+ 0xbd0006cb,
+ 0x08c5c774,
+ 0xf4091bf4,
+ 0x0ef40232,
+/* 0x09dd: i2c_recv_not_wr08 */
+/* 0x09dd: i2c_recv_done */
+ 0xf8cec703,
+ 0x00088c7e,
+ 0xd0fce0fc,
+ 0xb20912f4,
+ 0x029f7e7c,
+/* 0x09f1: i2c_recv_exit */
+/* 0x09f3: i2c_init */
0xf800f800,
-/* 0x0a21: idle */
- 0x0031f400,
- 0xcf045441,
- 0x10b60011,
- 0x04544001,
- 0xbd0001f6,
-/* 0x0a35: idle_loop */
- 0xf4580104,
-/* 0x0a3a: idle_proc */
-/* 0x0a3a: idle_proc_exec */
- 0x10f90232,
- 0xa87e1eb2,
- 0x10fc0002,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0a4d: idle_proc_next */
- 0x5810b6f0,
- 0x1bf41fa6,
- 0xe002f4e8,
- 0xf40028f4,
- 0x0000c60e,
+/* 0x09f5: test_recv */
+ 0x04584100,
+ 0xb60011cf,
+ 0x58400110,
+ 0x0001f604,
+ 0x00de04bd,
+ 0x7e134fd9,
+ 0xf80001de,
+/* 0x0a11: test_init */
+ 0x08004e00,
+ 0x0001de7e,
+/* 0x0a1a: idle_recv */
+ 0x00f800f8,
+/* 0x0a1c: idle */
+ 0x410031f4,
+ 0x11cf0454,
+ 0x0110b600,
+ 0xf6045440,
+ 0x04bd0001,
+/* 0x0a30: idle_loop */
+ 0x32f45801,
+/* 0x0a35: idle_proc */
+/* 0x0a35: idle_proc_exec */
+ 0xb210f902,
+ 0x02a87e1e,
+ 0xf410fc00,
+ 0x31f40911,
+ 0xf00ef402,
+/* 0x0a48: idle_proc_next */
+ 0xa65810b6,
+ 0xe81bf41f,
+ 0xf4e002f4,
+ 0x0ef40028,
+ 0x000000c6,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
index 6a2572e8945a..defddf5957ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
@@ -47,8 +47,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x584d454d,
- 0x0000083a,
- 0x0000082c,
+ 0x00000833,
+ 0x00000825,
0x00000000,
0x00000000,
0x00000000,
@@ -69,8 +69,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x46524550,
- 0x0000083e,
- 0x0000083c,
+ 0x00000837,
+ 0x00000835,
0x00000000,
0x00000000,
0x00000000,
@@ -91,8 +91,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x5f433249,
- 0x00000c6e,
- 0x00000b11,
+ 0x00000c67,
+ 0x00000b0a,
0x00000000,
0x00000000,
0x00000000,
@@ -113,8 +113,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x54534554,
- 0x00000c97,
- 0x00000c70,
+ 0x00000c90,
+ 0x00000c69,
0x00000000,
0x00000000,
0x00000000,
@@ -135,8 +135,8 @@ static uint32_t gt215_pmu_data[] = {
0x00000000,
0x00000000,
0x454c4449,
- 0x00000ca3,
- 0x00000ca1,
+ 0x00000c9c,
+ 0x00000c9a,
0x00000000,
0x00000000,
0x00000000,
@@ -234,22 +234,22 @@ static uint32_t gt215_pmu_data[] = {
/* 0x037c: memx_func_next */
0x00000002,
0x00000000,
- 0x000005a0,
+ 0x0000059f,
0x00000003,
0x00000002,
- 0x00000632,
+ 0x0000062f,
0x00040004,
0x00000000,
- 0x0000064e,
+ 0x0000064b,
0x00010005,
0x00000000,
- 0x0000066b,
+ 0x00000668,
0x00010006,
0x00000000,
- 0x000005f0,
+ 0x000005ef,
0x00000007,
0x00000000,
- 0x00000676,
+ 0x00000673,
/* 0x03c4: memx_func_tail */
/* 0x03c4: memx_ts_start */
0x00000000,
@@ -1305,560 +1305,560 @@ static uint32_t gt215_pmu_code[] = {
0x67f102d7,
0x63f1fffc,
0x76fdffff,
- 0x0267f104,
- 0x0576fd00,
- 0x70f980f9,
- 0xe0fcd0fc,
- 0xf04021f4,
+ 0x0267f004,
+ 0xf90576fd,
+ 0xfc70f980,
+ 0xf4e0fcd0,
+ 0x67f04021,
+ 0xe007f104,
+ 0x0604b607,
+ 0xbd0006d0,
+/* 0x0581: memx_func_enter_wait */
+ 0xc067f104,
+ 0x0664b607,
+ 0xf00066cf,
+ 0x0bf40464,
+ 0x2c67f0f3,
+ 0xcf0664b6,
+ 0x06800066,
+/* 0x059f: memx_func_leave */
+ 0xf000f8f1,
+ 0x64b62c67,
+ 0x0066cf06,
+ 0xf0f20680,
0x07f10467,
- 0x04b607e0,
+ 0x04b607e4,
0x0006d006,
-/* 0x0582: memx_func_enter_wait */
+/* 0x05ba: memx_func_leave_wait */
0x67f104bd,
0x64b607c0,
0x0066cf06,
0xf40464f0,
- 0x67f0f30b,
- 0x0664b62c,
- 0x800066cf,
- 0x00f8f106,
-/* 0x05a0: memx_func_leave */
- 0xb62c67f0,
- 0x66cf0664,
- 0xf2068000,
- 0xf10467f0,
- 0xb607e407,
- 0x06d00604,
-/* 0x05bb: memx_func_leave_wait */
- 0xf104bd00,
- 0xb607c067,
- 0x66cf0664,
- 0x0464f000,
- 0xf1f31bf4,
- 0xb9161087,
- 0x21f4028e,
- 0x02d7b904,
- 0xffcc67f1,
- 0xffff63f1,
- 0xf90476fd,
- 0xfc70f980,
- 0xf4e0fcd0,
- 0x00f84021,
-/* 0x05f0: memx_func_wait_vblank */
- 0xb0001698,
- 0x0bf40066,
- 0x0166b013,
- 0xf4060bf4,
-/* 0x0602: memx_func_wait_vblank_head1 */
- 0x77f12e0e,
- 0x0ef40020,
-/* 0x0609: memx_func_wait_vblank_head0 */
- 0x0877f107,
-/* 0x060d: memx_func_wait_vblank_0 */
- 0xc467f100,
- 0x0664b607,
- 0xfd0066cf,
- 0x1bf40467,
-/* 0x061d: memx_func_wait_vblank_1 */
- 0xc467f1f3,
- 0x0664b607,
- 0xfd0066cf,
- 0x0bf40467,
-/* 0x062d: memx_func_wait_vblank_fini */
- 0x0410b6f3,
-/* 0x0632: memx_func_wr32 */
- 0x169800f8,
- 0x01159800,
- 0xf90810b6,
- 0xfc50f960,
- 0xf4e0fcd0,
- 0x42b64021,
- 0xe91bf402,
-/* 0x064e: memx_func_wait */
- 0x87f000f8,
- 0x0684b62c,
- 0x980088cf,
- 0x1d98001e,
- 0x021c9801,
- 0xb6031b98,
- 0x21f41010,
-/* 0x066b: memx_func_delay */
- 0x9800f8a3,
- 0x10b6001e,
- 0x7e21f404,
-/* 0x0676: memx_func_train */
- 0x57f100f8,
- 0x77f10003,
- 0x97f10000,
- 0x93f00000,
- 0x029eb970,
- 0xb90421f4,
- 0xe7f102d8,
- 0x21f42710,
-/* 0x0695: memx_func_train_loop_outer */
- 0x0158e07e,
- 0x0083f101,
- 0xe097f102,
- 0x1193f011,
- 0x80f990f9,
+ 0x87f1f31b,
+ 0x8eb91610,
+ 0x0421f402,
+ 0xf102d7b9,
+ 0xf1ffcc67,
+ 0xfdffff63,
+ 0x80f90476,
+ 0xd0fc70f9,
+ 0x21f4e0fc,
+/* 0x05ef: memx_func_wait_vblank */
+ 0x9800f840,
+ 0x66b00016,
+ 0x120bf400,
+ 0xf40166b0,
+ 0x0ef4060b,
+/* 0x0601: memx_func_wait_vblank_head1 */
+ 0x2077f02c,
+/* 0x0607: memx_func_wait_vblank_head0 */
+ 0xf0060ef4,
+/* 0x060a: memx_func_wait_vblank_0 */
+ 0x67f10877,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x061a: memx_func_wait_vblank_1 */
+ 0x67f1f31b,
+ 0x64b607c4,
+ 0x0066cf06,
+ 0xf40467fd,
+/* 0x062a: memx_func_wait_vblank_fini */
+ 0x10b6f30b,
+/* 0x062f: memx_func_wr32 */
+ 0x9800f804,
+ 0x15980016,
+ 0x0810b601,
+ 0x50f960f9,
0xe0fcd0fc,
- 0xf94021f4,
- 0x0067f150,
-/* 0x06b5: memx_func_train_loop_inner */
- 0x1187f100,
- 0x9068ff11,
- 0xfd109894,
- 0x97f10589,
- 0x93f00720,
- 0xf990f910,
- 0xfcd0fc80,
- 0x4021f4e0,
- 0x008097f1,
- 0xb91093f0,
- 0x21f4029e,
- 0x02d8b904,
- 0xf92088c5,
+ 0xb64021f4,
+ 0x1bf40242,
+/* 0x064b: memx_func_wait */
+ 0xf000f8e9,
+ 0x84b62c87,
+ 0x0088cf06,
+ 0x98001e98,
+ 0x1c98011d,
+ 0x031b9802,
+ 0xf41010b6,
+ 0x00f8a321,
+/* 0x0668: memx_func_delay */
+ 0xb6001e98,
+ 0x21f40410,
+/* 0x0673: memx_func_train */
+ 0xf000f87e,
+ 0x77f00357,
+ 0x0097f100,
+ 0x7093f000,
+ 0xf4029eb9,
+ 0xd8b90421,
+ 0x10e7f102,
+ 0x7e21f427,
+/* 0x0690: memx_func_train_loop_outer */
+ 0x010158e0,
+ 0x020083f1,
+ 0x11e097f1,
+ 0xf91193f0,
+ 0xfc80f990,
+ 0xf4e0fcd0,
+ 0x50f94021,
+/* 0x06af: memx_func_train_loop_inner */
+ 0xf10067f0,
+ 0xff111187,
+ 0x98949068,
+ 0x0589fd10,
+ 0x072097f1,
+ 0xf91093f0,
0xfc80f990,
0xf4e0fcd0,
0x97f14021,
- 0x93f0053c,
- 0x0287f110,
- 0x0083f130,
- 0xf990f980,
+ 0x93f00080,
+ 0x029eb910,
+ 0xb90421f4,
+ 0x88c502d8,
+ 0xf990f920,
0xfcd0fc80,
0x4021f4e0,
- 0x0560e7f1,
- 0xf110e3f0,
- 0xf10000d7,
- 0x908000d3,
- 0xb7f100dc,
- 0xb3f08480,
- 0xa321f41e,
- 0x000057f1,
- 0xffff97f1,
- 0x830093f1,
-/* 0x0734: memx_func_train_loop_4x */
- 0x0080a7f1,
- 0xb910a3f0,
- 0x21f402ae,
- 0x02d8b904,
- 0xffdfb7f1,
- 0xffffb3f1,
- 0xf9048bfd,
- 0xfc80f9a0,
+ 0x053c97f1,
+ 0xf11093f0,
+ 0xf1300287,
+ 0xf9800083,
+ 0xfc80f990,
0xf4e0fcd0,
- 0xa7f14021,
- 0xa3f0053c,
- 0x0287f110,
- 0x0083f130,
- 0xf9a0f980,
- 0xfcd0fc80,
- 0x4021f4e0,
- 0x0560e7f1,
- 0xf110e3f0,
- 0xf10000d7,
- 0xb98000d3,
- 0xb7f102dc,
- 0xb3f02710,
- 0xa321f400,
- 0xf402eeb9,
- 0xddb90421,
- 0x949dff02,
+ 0xe7f14021,
+ 0xe3f00560,
+ 0x00d7f110,
+ 0x00d3f100,
+ 0x00dc9080,
+ 0x8480b7f1,
+ 0xf41eb3f0,
+ 0x57f0a321,
+ 0xff97f100,
+ 0x0093f1ff,
+/* 0x072d: memx_func_train_loop_4x */
+ 0x80a7f183,
+ 0x10a3f000,
+ 0xf402aeb9,
+ 0xd8b90421,
+ 0xdfb7f102,
+ 0xffb3f1ff,
+ 0x048bfdff,
+ 0x80f9a0f9,
+ 0xe0fcd0fc,
+ 0xf14021f4,
+ 0xf0053ca7,
+ 0x87f110a3,
+ 0x83f13002,
+ 0xa0f98000,
+ 0xd0fc80f9,
+ 0x21f4e0fc,
+ 0x60e7f140,
+ 0x10e3f005,
+ 0x0000d7f1,
+ 0x8000d3f1,
+ 0xf102dcb9,
+ 0xf02710b7,
+ 0x21f400b3,
+ 0x02eeb9a3,
+ 0xb90421f4,
+ 0x9dff02dd,
+ 0x0150b694,
+ 0xf4045670,
+ 0x7aa0921e,
+ 0xa9800bcc,
+ 0x0160b600,
+ 0x700470b6,
+ 0x1ef51066,
+ 0x50fcff01,
0x700150b6,
- 0x1ef40456,
- 0xcc7aa092,
- 0x00a9800b,
- 0xb60160b6,
- 0x66700470,
- 0x001ef510,
- 0xb650fcff,
- 0x56700150,
- 0xd41ef507,
-/* 0x07c7: memx_exec */
- 0xf900f8fe,
- 0xb9d0f9e0,
- 0xb2b902c1,
-/* 0x07d1: memx_exec_next */
- 0x00139802,
- 0xe70410b6,
- 0xe701f034,
- 0xb601e033,
- 0x30f00132,
- 0xde35980c,
- 0x12b855f9,
- 0xe41ef406,
- 0x98f10b98,
- 0xcbbbf20c,
- 0xc4b7f102,
- 0x06b4b607,
- 0xfc00bbcf,
- 0xf5e0fcd0,
+ 0x1ef50756,
+ 0x00f8fed6,
+/* 0x07c0: memx_exec */
+ 0xd0f9e0f9,
+ 0xb902c1b9,
+/* 0x07ca: memx_exec_next */
+ 0x139802b2,
+ 0x0410b600,
+ 0x01f034e7,
+ 0x01e033e7,
+ 0xf00132b6,
+ 0x35980c30,
+ 0xb855f9de,
+ 0x1ef40612,
+ 0xf10b98e4,
+ 0xbbf20c98,
+ 0xb7f102cb,
+ 0xb4b607c4,
+ 0x00bbcf06,
+ 0xe0fcd0fc,
+ 0x033621f5,
+/* 0x0806: memx_info */
+ 0xc67000f8,
+ 0x0e0bf401,
+/* 0x080c: memx_info_data */
+ 0x03ccc7f1,
+ 0x0800b7f1,
+/* 0x0817: memx_info_train */
+ 0xf10b0ef4,
+ 0xf10bccc7,
+/* 0x081f: memx_info_send */
+ 0xf50100b7,
0xf8033621,
-/* 0x080d: memx_info */
- 0x01c67000,
-/* 0x0813: memx_info_data */
- 0xf10e0bf4,
- 0xf103ccc7,
- 0xf40800b7,
-/* 0x081e: memx_info_train */
- 0xc7f10b0e,
- 0xb7f10bcc,
-/* 0x0826: memx_info_send */
- 0x21f50100,
- 0x00f80336,
-/* 0x082c: memx_recv */
- 0xf401d6b0,
- 0xd6b0980b,
- 0xd80bf400,
-/* 0x083a: memx_init */
- 0x00f800f8,
-/* 0x083c: perf_recv */
-/* 0x083e: perf_init */
- 0x00f800f8,
-/* 0x0840: i2c_drive_scl */
- 0xf40036b0,
- 0x07f1110b,
- 0x04b607e0,
- 0x0001d006,
- 0x00f804bd,
-/* 0x0854: i2c_drive_scl_lo */
- 0x07e407f1,
- 0xd00604b6,
- 0x04bd0001,
-/* 0x0862: i2c_drive_sda */
- 0x36b000f8,
- 0x110bf400,
- 0x07e007f1,
- 0xd00604b6,
- 0x04bd0002,
-/* 0x0876: i2c_drive_sda_lo */
- 0x07f100f8,
- 0x04b607e4,
- 0x0002d006,
- 0x00f804bd,
-/* 0x0884: i2c_sense_scl */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0431fd00,
- 0xf4060bf4,
-/* 0x089a: i2c_sense_scl_done */
- 0x00f80131,
-/* 0x089c: i2c_sense_sda */
- 0xf10132f4,
- 0xb607c437,
- 0x33cf0634,
- 0x0432fd00,
- 0xf4060bf4,
-/* 0x08b2: i2c_sense_sda_done */
- 0x00f80131,
-/* 0x08b4: i2c_raise_scl */
- 0x47f140f9,
- 0x37f00898,
- 0x4021f501,
-/* 0x08c1: i2c_raise_scl_wait */
+/* 0x0825: memx_recv */
+ 0x01d6b000,
+ 0xb0980bf4,
+ 0x0bf400d6,
+/* 0x0833: memx_init */
+ 0xf800f8d8,
+/* 0x0835: perf_recv */
+/* 0x0837: perf_init */
+ 0xf800f800,
+/* 0x0839: i2c_drive_scl */
+ 0x0036b000,
+ 0xf1110bf4,
+ 0xb607e007,
+ 0x01d00604,
+ 0xf804bd00,
+/* 0x084d: i2c_drive_scl_lo */
+ 0xe407f100,
+ 0x0604b607,
+ 0xbd0001d0,
+/* 0x085b: i2c_drive_sda */
+ 0xb000f804,
+ 0x0bf40036,
+ 0xe007f111,
+ 0x0604b607,
+ 0xbd0002d0,
+/* 0x086f: i2c_drive_sda_lo */
+ 0xf100f804,
+ 0xb607e407,
+ 0x02d00604,
+ 0xf804bd00,
+/* 0x087d: i2c_sense_scl */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xcf0634b6,
+ 0x31fd0033,
+ 0x060bf404,
+/* 0x0893: i2c_sense_scl_done */
+ 0xf80131f4,
+/* 0x0895: i2c_sense_sda */
+ 0x0132f400,
+ 0x07c437f1,
+ 0xcf0634b6,
+ 0x32fd0033,
+ 0x060bf404,
+/* 0x08ab: i2c_sense_sda_done */
+ 0xf80131f4,
+/* 0x08ad: i2c_raise_scl */
+ 0xf140f900,
+ 0xf0089847,
+ 0x21f50137,
+/* 0x08ba: i2c_raise_scl_wait */
+ 0xe7f10839,
+ 0x21f403e8,
+ 0x7d21f57e,
+ 0x0901f408,
+ 0xf40142b6,
+/* 0x08ce: i2c_raise_scl_done */
+ 0x40fcef1b,
+/* 0x08d2: i2c_start */
+ 0x21f500f8,
+ 0x11f4087d,
+ 0x9521f50d,
+ 0x0611f408,
+/* 0x08e3: i2c_start_rep */
+ 0xf0300ef4,
+ 0x21f50037,
+ 0x37f00839,
+ 0x5b21f501,
+ 0x0076bb08,
+ 0xf90465b6,
+ 0x04659450,
+ 0xbd0256bb,
+ 0x0475fd50,
+ 0x21f550fc,
+ 0x64b608ad,
+ 0x1f11f404,
+/* 0x0910: i2c_start_send */
+ 0xf50037f0,
+ 0xf1085b21,
+ 0xf41388e7,
+ 0x37f07e21,
+ 0x3921f500,
+ 0x88e7f108,
+ 0x7e21f413,
+/* 0x092c: i2c_start_out */
+/* 0x092e: i2c_stop */
+ 0x37f000f8,
+ 0x3921f500,
+ 0x0037f008,
+ 0x085b21f5,
+ 0x03e8e7f1,
+ 0xf07e21f4,
+ 0x21f50137,
+ 0xe7f10839,
+ 0x21f41388,
+ 0x0137f07e,
+ 0x085b21f5,
+ 0x1388e7f1,
+ 0xf87e21f4,
+/* 0x0961: i2c_bitw */
+ 0x5b21f500,
0xe8e7f108,
0x7e21f403,
- 0x088421f5,
- 0xb60901f4,
- 0x1bf40142,
-/* 0x08d5: i2c_raise_scl_done */
- 0xf840fcef,
-/* 0x08d9: i2c_start */
- 0x8421f500,
- 0x0d11f408,
- 0x089c21f5,
- 0xf40611f4,
-/* 0x08ea: i2c_start_rep */
- 0x37f0300e,
- 0x4021f500,
- 0x0137f008,
- 0x086221f5,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xb421f550,
+ 0xad21f550,
0x0464b608,
-/* 0x0917: i2c_start_send */
- 0xf01f11f4,
- 0x21f50037,
- 0xe7f10862,
- 0x21f41388,
- 0x0037f07e,
- 0x084021f5,
- 0x1388e7f1,
-/* 0x0933: i2c_start_out */
- 0xf87e21f4,
-/* 0x0935: i2c_stop */
- 0x0037f000,
- 0x084021f5,
- 0xf50037f0,
- 0xf1086221,
- 0xf403e8e7,
+ 0xf11811f4,
+ 0xf41388e7,
0x37f07e21,
- 0x4021f501,
+ 0x3921f500,
0x88e7f108,
0x7e21f413,
- 0xf50137f0,
- 0xf1086221,
- 0xf41388e7,
- 0x00f87e21,
-/* 0x0968: i2c_bitw */
- 0x086221f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x08b421f5,
- 0xf40464b6,
- 0xe7f11811,
+/* 0x09a0: i2c_bitw_out */
+/* 0x09a2: i2c_bitr */
+ 0x37f000f8,
+ 0x5b21f501,
+ 0xe8e7f108,
+ 0x7e21f403,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xad21f550,
+ 0x0464b608,
+ 0xf51b11f4,
+ 0xf0089521,
+ 0x21f50037,
+ 0xe7f10839,
0x21f41388,
- 0x0037f07e,
- 0x084021f5,
- 0x1388e7f1,
-/* 0x09a7: i2c_bitw_out */
- 0xf87e21f4,
-/* 0x09a9: i2c_bitr */
- 0x0137f000,
- 0x086221f5,
- 0x03e8e7f1,
- 0xbb7e21f4,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x08b421f5,
- 0xf40464b6,
- 0x21f51b11,
- 0x37f0089c,
- 0x4021f500,
- 0x88e7f108,
- 0x7e21f413,
- 0xf4013cf0,
-/* 0x09ee: i2c_bitr_done */
- 0x00f80131,
-/* 0x09f0: i2c_get_byte */
- 0xf00057f0,
-/* 0x09f6: i2c_get_byte_next */
- 0x54b60847,
+ 0x013cf07e,
+/* 0x09e7: i2c_bitr_done */
+ 0xf80131f4,
+/* 0x09e9: i2c_get_byte */
+ 0x0057f000,
+/* 0x09ef: i2c_get_byte_next */
+ 0xb60847f0,
+ 0x76bb0154,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb609a221,
+ 0x11f40464,
+ 0x0553fd2b,
+ 0xf40142b6,
+ 0x37f0d81b,
0x0076bb01,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b609a9,
- 0x2b11f404,
- 0xb60553fd,
- 0x1bf40142,
- 0x0137f0d8,
- 0xb60076bb,
- 0x50f90465,
- 0xbb046594,
- 0x50bd0256,
- 0xfc0475fd,
- 0x6821f550,
- 0x0464b609,
-/* 0x0a40: i2c_get_byte_done */
-/* 0x0a42: i2c_put_byte */
- 0x47f000f8,
-/* 0x0a45: i2c_put_byte_next */
- 0x0142b608,
- 0xbb3854ff,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x096821f5,
- 0xf40464b6,
- 0x46b03411,
- 0xd81bf400,
+ 0x64b60961,
+/* 0x0a39: i2c_get_byte_done */
+/* 0x0a3b: i2c_put_byte */
+ 0xf000f804,
+/* 0x0a3e: i2c_put_byte_next */
+ 0x42b60847,
+ 0x3854ff01,
0xb60076bb,
0x50f90465,
0xbb046594,
0x50bd0256,
0xfc0475fd,
- 0xa921f550,
+ 0x6121f550,
0x0464b609,
- 0xbb0f11f4,
- 0x36b00076,
- 0x061bf401,
-/* 0x0a9b: i2c_put_byte_done */
- 0xf80132f4,
-/* 0x0a9d: i2c_addr */
- 0x0076bb00,
+ 0xb03411f4,
+ 0x1bf40046,
+ 0x0076bbd8,
0xf90465b6,
0x04659450,
0xbd0256bb,
0x0475fd50,
0x21f550fc,
- 0x64b608d9,
- 0x2911f404,
- 0x012ec3e7,
- 0xfd0134b6,
- 0x76bb0553,
+ 0x64b609a2,
+ 0x0f11f404,
+ 0xb00076bb,
+ 0x1bf40136,
+ 0x0132f406,
+/* 0x0a94: i2c_put_byte_done */
+/* 0x0a96: i2c_addr */
+ 0x76bb00f8,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb60a4221,
-/* 0x0ae2: i2c_addr_done */
- 0x00f80464,
-/* 0x0ae4: i2c_acquire_addr */
- 0xb6f8cec7,
- 0xe0b702e4,
- 0xee980d1c,
-/* 0x0af3: i2c_acquire */
- 0xf500f800,
- 0xf40ae421,
- 0xd9f00421,
- 0x4021f403,
-/* 0x0b02: i2c_release */
- 0x21f500f8,
- 0x21f40ae4,
- 0x03daf004,
- 0xf84021f4,
-/* 0x0b11: i2c_recv */
- 0x0132f400,
- 0xb6f8c1c7,
- 0x16b00214,
- 0x3a1ff528,
- 0xf413a001,
- 0x0032980c,
- 0x0ccc13a0,
- 0xf4003198,
- 0xd0f90231,
- 0xd0f9e0f9,
- 0x000067f1,
- 0x100063f1,
- 0xbb016792,
+ 0xb608d221,
+ 0x11f40464,
+ 0x2ec3e729,
+ 0x0134b601,
+ 0xbb0553fd,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0af321f5,
- 0xfc0464b6,
- 0x00d6b0d0,
- 0x00b31bf5,
- 0xbb0057f0,
+ 0x0a3b21f5,
+/* 0x0adb: i2c_addr_done */
+ 0xf80464b6,
+/* 0x0add: i2c_acquire_addr */
+ 0xf8cec700,
+ 0xb702e4b6,
+ 0x980d1ce0,
+ 0x00f800ee,
+/* 0x0aec: i2c_acquire */
+ 0x0add21f5,
+ 0xf00421f4,
+ 0x21f403d9,
+/* 0x0afb: i2c_release */
+ 0xf500f840,
+ 0xf40add21,
+ 0xdaf00421,
+ 0x4021f403,
+/* 0x0b0a: i2c_recv */
+ 0x32f400f8,
+ 0xf8c1c701,
+ 0xb00214b6,
+ 0x1ff52816,
+ 0x13a0013a,
+ 0x32980cf4,
+ 0xcc13a000,
+ 0x0031980c,
+ 0xf90231f4,
+ 0xf9e0f9d0,
+ 0x0067f1d0,
+ 0x0063f100,
+ 0x01679210,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0xec21f550,
+ 0x0464b60a,
+ 0xd6b0d0fc,
+ 0xb31bf500,
+ 0x0057f000,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x9621f550,
+ 0x0464b60a,
+ 0x00d011f5,
+ 0xbbe0c5c7,
0x65b60076,
0x9450f904,
0x56bb0465,
0xfd50bd02,
0x50fc0475,
- 0x0a9d21f5,
+ 0x0a3b21f5,
0xf50464b6,
- 0xc700d011,
- 0x76bbe0c5,
+ 0xf000ad11,
+ 0x76bb0157,
0x0465b600,
0x659450f9,
0x0256bb04,
0x75fd50bd,
0xf550fc04,
- 0xb60a4221,
+ 0xb60a9621,
0x11f50464,
- 0x57f000ad,
- 0x0076bb01,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b60a9d,
- 0x8a11f504,
- 0x0076bb00,
- 0xf90465b6,
- 0x04659450,
- 0xbd0256bb,
- 0x0475fd50,
- 0x21f550fc,
- 0x64b609f0,
- 0x6a11f404,
- 0xbbe05bcb,
- 0x65b60076,
- 0x9450f904,
- 0x56bb0465,
- 0xfd50bd02,
- 0x50fc0475,
- 0x093521f5,
- 0xb90464b6,
- 0x74bd025b,
-/* 0x0c17: i2c_recv_not_rd08 */
- 0xb0430ef4,
- 0x1bf401d6,
- 0x0057f03d,
- 0x0a9d21f5,
- 0xc73311f4,
- 0x21f5e0c5,
- 0x11f40a42,
- 0x0057f029,
- 0x0a9d21f5,
- 0xc71f11f4,
- 0x21f5e0b5,
- 0x11f40a42,
- 0x3521f515,
- 0xc774bd09,
- 0x1bf408c5,
- 0x0232f409,
-/* 0x0c57: i2c_recv_not_wr08 */
-/* 0x0c57: i2c_recv_done */
- 0xc7030ef4,
- 0x21f5f8ce,
- 0xe0fc0b02,
- 0x12f4d0fc,
- 0x027cb90a,
- 0x033621f5,
-/* 0x0c6c: i2c_recv_exit */
-/* 0x0c6e: i2c_init */
+ 0x76bb008a,
+ 0x0465b600,
+ 0x659450f9,
+ 0x0256bb04,
+ 0x75fd50bd,
+ 0xf550fc04,
+ 0xb609e921,
+ 0x11f40464,
+ 0xe05bcb6a,
+ 0xb60076bb,
+ 0x50f90465,
+ 0xbb046594,
+ 0x50bd0256,
+ 0xfc0475fd,
+ 0x2e21f550,
+ 0x0464b609,
+ 0xbd025bb9,
+ 0x430ef474,
+/* 0x0c10: i2c_recv_not_rd08 */
+ 0xf401d6b0,
+ 0x57f03d1b,
+ 0x9621f500,
+ 0x3311f40a,
+ 0xf5e0c5c7,
+ 0xf40a3b21,
+ 0x57f02911,
+ 0x9621f500,
+ 0x1f11f40a,
+ 0xf5e0b5c7,
+ 0xf40a3b21,
+ 0x21f51511,
+ 0x74bd092e,
+ 0xf408c5c7,
+ 0x32f4091b,
+ 0x030ef402,
+/* 0x0c50: i2c_recv_not_wr08 */
+/* 0x0c50: i2c_recv_done */
+ 0xf5f8cec7,
+ 0xfc0afb21,
+ 0xf4d0fce0,
+ 0x7cb90a12,
+ 0x3621f502,
+/* 0x0c65: i2c_recv_exit */
+/* 0x0c67: i2c_init */
+ 0xf800f803,
+/* 0x0c69: test_recv */
+ 0xd817f100,
+ 0x0614b605,
+ 0xb60011cf,
+ 0x07f10110,
+ 0x04b605d8,
+ 0x0001d006,
+ 0xe7f104bd,
+ 0xe3f1d900,
+ 0x21f5134f,
+ 0x00f80256,
+/* 0x0c90: test_init */
+ 0x0800e7f1,
+ 0x025621f5,
+/* 0x0c9a: idle_recv */
0x00f800f8,
-/* 0x0c70: test_recv */
- 0x05d817f1,
- 0xcf0614b6,
- 0x10b60011,
- 0xd807f101,
- 0x0604b605,
- 0xbd0001d0,
- 0x00e7f104,
- 0x4fe3f1d9,
- 0x5621f513,
-/* 0x0c97: test_init */
- 0xf100f802,
- 0xf50800e7,
- 0xf8025621,
-/* 0x0ca1: idle_recv */
-/* 0x0ca3: idle */
- 0xf400f800,
- 0x17f10031,
- 0x14b605d4,
- 0x0011cf06,
- 0xf10110b6,
- 0xb605d407,
- 0x01d00604,
-/* 0x0cbf: idle_loop */
- 0xf004bd00,
- 0x32f45817,
-/* 0x0cc5: idle_proc */
-/* 0x0cc5: idle_proc_exec */
- 0xb910f902,
- 0x21f5021e,
- 0x10fc033f,
- 0xf40911f4,
- 0x0ef40231,
-/* 0x0cd9: idle_proc_next */
- 0x5810b6ef,
- 0xf4061fb8,
- 0x02f4e61b,
- 0x0028f4dd,
- 0x00bb0ef4,
+/* 0x0c9c: idle */
+ 0xf10031f4,
+ 0xb605d417,
+ 0x11cf0614,
+ 0x0110b600,
+ 0x05d407f1,
+ 0xd00604b6,
+ 0x04bd0001,
+/* 0x0cb8: idle_loop */
+ 0xf45817f0,
+/* 0x0cbe: idle_proc */
+/* 0x0cbe: idle_proc_exec */
+ 0x10f90232,
+ 0xf5021eb9,
+ 0xfc033f21,
+ 0x0911f410,
+ 0xf40231f4,
+/* 0x0cd2: idle_proc_next */
+ 0x10b6ef0e,
+ 0x061fb858,
+ 0xf4e61bf4,
+ 0x28f4dd02,
+ 0xbb0ef400,
+ 0x00000000,
+ 0x00000000,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
index ec03f9a4290b..1663bf943d77 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
@@ -82,15 +82,15 @@ memx_train_tail:
// $r0 - zero
memx_func_enter:
#if NVKM_PPWR_CHIPSET == GT215
- movw $r8 0x1610
+ mov $r8 0x1610
nv_rd32($r7, $r8)
imm32($r6, 0xfffffffc)
and $r7 $r6
- movw $r6 0x2
+ mov $r6 0x2
or $r7 $r6
nv_wr32($r8, $r7)
#else
- movw $r6 0x001620
+ mov $r6 0x001620
imm32($r7, ~0x00000aa2);
nv_rd32($r8, $r6)
and $r8 $r7
@@ -101,7 +101,7 @@ memx_func_enter:
and $r8 $r7
nv_wr32($r6, $r8)
- movw $r6 0x0026f0
+ mov $r6 0x0026f0
nv_rd32($r8, $r6)
and $r8 $r7
nv_wr32($r6, $r8)
@@ -136,19 +136,19 @@ memx_func_leave:
bra nz #memx_func_leave_wait
#if NVKM_PPWR_CHIPSET == GT215
- movw $r8 0x1610
+ mov $r8 0x1610
nv_rd32($r7, $r8)
imm32($r6, 0xffffffcc)
and $r7 $r6
nv_wr32($r8, $r7)
#else
- movw $r6 0x0026f0
+ mov $r6 0x0026f0
imm32($r7, 0x00000001)
nv_rd32($r8, $r6)
or $r8 $r7
nv_wr32($r6, $r8)
- movw $r6 0x001620
+ mov $r6 0x001620
nv_rd32($r8, $r6)
or $r8 $r7
nv_wr32($r6, $r8)
@@ -177,11 +177,11 @@ memx_func_wait_vblank:
bra #memx_func_wait_vblank_fini
memx_func_wait_vblank_head1:
- movw $r7 0x20
+ mov $r7 0x20
bra #memx_func_wait_vblank_0
memx_func_wait_vblank_head0:
- movw $r7 0x8
+ mov $r7 0x8
memx_func_wait_vblank_0:
nv_iord($r6, NV_PPWR_INPUT)
@@ -273,13 +273,13 @@ memx_func_train:
// $r5 - outer loop counter
// $r6 - inner loop counter
// $r7 - entry counter (#memx_train_head + $r7)
- movw $r5 0x3
- movw $r7 0x0
+ mov $r5 0x3
+ mov $r7 0x0
// Read random memory to wake up... things
imm32($r9, 0x700000)
nv_rd32($r8,$r9)
- movw $r14 0x2710
+ mov $r14 0x2710
call(nsec)
memx_func_train_loop_outer:
@@ -289,9 +289,9 @@ memx_func_train:
nv_wr32($r9, $r8)
push $r5
- movw $r6 0x0
+ mov $r6 0x0
memx_func_train_loop_inner:
- movw $r8 0x1111
+ mov $r8 0x1111
mulu $r9 $r6 $r8
shl b32 $r8 $r9 0x10
or $r8 $r9
@@ -315,7 +315,7 @@ memx_func_train:
// $r5 - inner inner loop counter
// $r9 - result
- movw $r5 0
+ mov $r5 0
imm32($r9, 0x8300ffff)
memx_func_train_loop_4x:
imm32($r10, 0x100080)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index e698f4836521..ed08120eefe0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -7,8 +7,10 @@ nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o
nvkm-y += nvkm/subdev/secboot/acr_r364.o
nvkm-y += nvkm/subdev/secboot/acr_r367.o
+nvkm-y += nvkm/subdev/secboot/acr_r370.o
nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
+nvkm-y += nvkm/subdev/secboot/gp108.o
nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index b615fc81aca4..73a2ac81ac69 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -64,6 +64,7 @@ struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long);
struct nvkm_acr *acr_r364_new(unsigned long);
struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
+struct nvkm_acr *acr_r370_new(enum nvkm_secboot_falcon, unsigned long);
struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
new file mode 100644
index 000000000000..2f890dfae7fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r370.h"
+#include "acr_r367.h"
+
+#include <core/msgqueue.h>
+#include <engine/falcon.h>
+#include <engine/sec2.h>
+
+static void
+acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ struct acr_r370_flcn_bl_desc *desc = _desc;
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ u64 base, addr_code, addr_data;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+}
+
+const struct acr_r352_ls_func
+acr_r370_ls_fecs_func = {
+ .load = acr_ls_ucode_load_fecs,
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
+const struct acr_r352_ls_func
+acr_r370_ls_gpccs_func = {
+ .load = acr_ls_ucode_load_gpccs,
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+};
+
+static void
+acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
+ struct acr_r370_flcn_bl_desc *desc = _desc;
+ u64 base, addr_code, addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ /* For some reason we should not add app_resident_code_offset here */
+ addr_code = base;
+ addr_data = base + pdesc->app_resident_data_offset;
+ addr_args = sec->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->ctx_dma = FALCON_SEC2_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->argc = 1;
+ /* args are stored at the beginning of EMEM */
+ desc->argv = 0x01000000;
+}
+
+const struct acr_r352_ls_func
+acr_r370_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
+ .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ .post_run = acr_ls_sec2_post_run,
+};
+
+void
+acr_r370_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+ u64 offset)
+{
+ struct acr_r370_flcn_bl_desc *bl_desc = _bl_desc;
+
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+ bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
+ bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
+ bl_desc->code_entry_point = 0;
+ bl_desc->code_dma_base = u64_to_flcn64(offset);
+ bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
+ bl_desc->data_size = hdr->data_size;
+}
+
+const struct acr_r352_func
+acr_r370_func = {
+ .fixup_hs_desc = acr_r367_fixup_hs_desc,
+ .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ .shadow_blob = true,
+ .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
+ .ls_fill_headers = acr_r367_ls_fill_headers,
+ .ls_write_wpr = acr_r367_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r370_ls_sec2_func,
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
+ },
+};
+
+struct nvkm_acr *
+acr_r370_new(enum nvkm_secboot_falcon boot_falcon,
+ unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r370_func, boot_falcon, managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
new file mode 100644
index 000000000000..3426f86a15e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_ACR_R370_H__
+#define __NVKM_SECBOOT_ACR_R370_H__
+
+#include "priv.h"
+struct hsf_load_header;
+
+/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
+struct acr_r370_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+};
+
+void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
+extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
+extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index ddb795bb007b..7bdef93cb7ae 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -20,90 +20,12 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "acr_r370.h"
#include "acr_r367.h"
-#include <engine/falcon.h>
#include <core/msgqueue.h>
#include <subdev/pmu.h>
-/*
- * r375 ACR: similar to r367, but with a unified bootloader descriptor
- * structure for GR and PMU falcons.
- */
-
-/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
-struct acr_r375_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
- u32 argc;
- u32 argv;
-};
-
-static void
-acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *img, u64 wpr_addr,
- void *_desc)
-{
- struct acr_r375_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
- u64 base, addr_code, addr_data;
-
- base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
- addr_code = base + pdesc->app_resident_code_offset;
- addr_data = base + pdesc->app_resident_data_offset;
-
- desc->ctx_dma = FALCON_DMAIDX_UCODE;
- desc->code_dma_base = u64_to_flcn64(addr_code);
- desc->non_sec_code_off = pdesc->app_resident_code_offset;
- desc->non_sec_code_size = pdesc->app_resident_code_size;
- desc->code_entry_point = pdesc->app_imem_entry;
- desc->data_dma_base = u64_to_flcn64(addr_data);
- desc->data_size = pdesc->app_resident_data_size;
-}
-
-static void
-acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
- u64 offset)
-{
- struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
-
- bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
- bl_desc->non_sec_code_off = hdr->non_sec_code_off;
- bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
- bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
- bl_desc->code_entry_point = 0;
- bl_desc->code_dma_base = u64_to_flcn64(offset);
- bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
- bl_desc->data_size = hdr->data_size;
-}
-
-const struct acr_r352_ls_func
-acr_r375_ls_fecs_func = {
- .load = acr_ls_ucode_load_fecs,
- .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
-};
-
-const struct acr_r352_ls_func
-acr_r375_ls_gpccs_func = {
- .load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
-};
-
-
static void
acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
const struct ls_ucode_img *img, u64 wpr_addr,
@@ -111,7 +33,7 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
{
const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
- struct acr_r375_flcn_bl_desc *desc = _desc;
+ struct acr_r370_flcn_bl_desc *desc = _desc;
u64 base, addr_code, addr_data;
u32 addr_args;
@@ -136,23 +58,22 @@ const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
.generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run,
};
-
const struct acr_r352_func
acr_r375_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
- .generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
- .hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .generate_hs_bl_desc = acr_r370_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.shadow_blob = true,
.ls_ucode_img_load = acr_r367_ls_ucode_img_load,
.ls_fill_headers = acr_r367_ls_fill_headers,
.ls_write_wpr = acr_r367_ls_write_wpr,
.ls_func = {
- [NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
- [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r370_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r370_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index f3b3c66349d2..1f7a3c1a7f50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -133,7 +133,7 @@ gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
return gm200_secboot_run_blob(sb, blob, falcon);
}
-static const struct nvkm_secboot_func
+const struct nvkm_secboot_func
gp102_secboot = {
.dtor = gm200_secboot_dtor,
.oneinit = gm200_secboot_oneinit,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
new file mode 100644
index 000000000000..e8c27ec700de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp108.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gm200.h"
+#include "acr.h"
+
+int
+gp108_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r370_new(NVKM_SECBOOT_FALCON_SEC2,
+ BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS) |
+ BIT(NVKM_SECBOOT_FALCON_SEC2));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
+
+ if (!(gsb = kzalloc(sizeof(*gsb), GFP_KERNEL))) {
+ acr->func->dtor(acr);
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ return nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
+}
+
+MODULE_FIRMWARE("nvidia/gp108/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp108/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp108/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index d9091f029506..959a7b2dbdc9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -40,6 +40,8 @@ int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
int nvkm_secboot_falcon_run(struct nvkm_secboot *);
+extern const struct nvkm_secboot_func gp102_secboot;
+
struct flcn_u64 {
u32 lo;
u32 hi;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
index 7ba56b12badd..550702eab0b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild
@@ -9,7 +9,9 @@ nvkm-y += nvkm/subdev/therm/nv40.o
nvkm-y += nvkm/subdev/therm/nv50.o
nvkm-y += nvkm/subdev/therm/g84.o
nvkm-y += nvkm/subdev/therm/gt215.o
+nvkm-y += nvkm/subdev/therm/gf100.o
nvkm-y += nvkm/subdev/therm/gf119.o
+nvkm-y += nvkm/subdev/therm/gk104.o
nvkm-y += nvkm/subdev/therm/gm107.o
nvkm-y += nvkm/subdev/therm/gm200.o
nvkm-y += nvkm/subdev/therm/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index f27fc6d0d4c6..bf62303571b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -21,6 +21,7 @@
*
* Authors: Martin Peres
*/
+#include <nvkm/core/option.h>
#include "priv.h"
int
@@ -297,6 +298,38 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
return -EINVAL;
}
+void
+nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
+{
+ if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+ return;
+
+ nvkm_debug(&therm->subdev,
+ "Enabling clockgating\n");
+ therm->func->clkgate_enable(therm);
+}
+
+void
+nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
+{
+ if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
+ return;
+
+ nvkm_debug(&therm->subdev,
+ "Preparing clockgating for %s\n",
+ suspend ? "suspend" : "fini");
+ therm->func->clkgate_fini(therm, suspend);
+}
+
+static void
+nvkm_therm_clkgate_oneinit(struct nvkm_therm *therm)
+{
+ if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+ return;
+
+ nvkm_info(&therm->subdev, "Clockgating enabled\n");
+}
+
static void
nvkm_therm_intr(struct nvkm_subdev *subdev)
{
@@ -333,6 +366,7 @@ nvkm_therm_oneinit(struct nvkm_subdev *subdev)
nvkm_therm_fan_ctor(therm);
nvkm_therm_fan_mode(therm, NVKM_THERM_CTRL_AUTO);
nvkm_therm_sensor_preinit(therm);
+ nvkm_therm_clkgate_oneinit(therm);
return 0;
}
@@ -357,6 +391,16 @@ nvkm_therm_init(struct nvkm_subdev *subdev)
return 0;
}
+void
+nvkm_therm_clkgate_init(struct nvkm_therm *therm,
+ const struct nvkm_therm_clkgate_pack *p)
+{
+ if (!therm->func->clkgate_init || !therm->clkgating_enabled)
+ return;
+
+ therm->func->clkgate_init(therm, p);
+}
+
static void *
nvkm_therm_dtor(struct nvkm_subdev *subdev)
{
@@ -374,15 +418,10 @@ nvkm_therm = {
.intr = nvkm_therm_intr,
};
-int
-nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
- int index, struct nvkm_therm **ptherm)
+void
+nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
+ int index, const struct nvkm_therm_func *func)
{
- struct nvkm_therm *therm;
-
- if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
- return -ENOMEM;
-
nvkm_subdev_ctor(&nvkm_therm, device, index, &therm->subdev);
therm->func = func;
@@ -395,5 +434,20 @@ nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
therm->attr_get = nvkm_therm_attr_get;
therm->attr_set = nvkm_therm_attr_set;
therm->mode = therm->suspend = -1; /* undefined */
+
+ therm->clkgating_enabled = nvkm_boolopt(device->cfgopt,
+ "NvPmEnableGating", false);
+}
+
+int
+nvkm_therm_new_(const struct nvkm_therm_func *func, struct nvkm_device *device,
+ int index, struct nvkm_therm **ptherm)
+{
+ struct nvkm_therm *therm;
+
+ if (!(therm = *ptherm = kzalloc(sizeof(*therm), GFP_KERNEL)))
+ return -ENOMEM;
+
+ nvkm_therm_ctor(therm, device, index, func);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
new file mode 100644
index 000000000000..5ae6913320e8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include <core/device.h>
+
+#include "priv.h"
+
+#define pack_for_each_init(init, pack, head) \
+ for (pack = head; pack && pack->init; pack++) \
+ for (init = pack->init; init && init->count; init++)
+void
+gf100_clkgate_init(struct nvkm_therm *therm,
+ const struct nvkm_therm_clkgate_pack *p)
+{
+ struct nvkm_device *device = therm->subdev.device;
+ const struct nvkm_therm_clkgate_pack *pack;
+ const struct nvkm_therm_clkgate_init *init;
+ u32 next, addr;
+
+ pack_for_each_init(init, pack, p) {
+ next = init->addr + init->count * 8;
+ addr = init->addr;
+
+ nvkm_trace(&therm->subdev, "{ 0x%06x, %d, 0x%08x }\n",
+ init->addr, init->count, init->data);
+ while (addr < next) {
+ nvkm_trace(&therm->subdev, "\t0x%06x = 0x%08x\n",
+ addr, init->data);
+ nvkm_wr32(device, addr, init->data);
+ addr += 8;
+ }
+ }
+}
+
+/*
+ * TODO: Fermi clockgating isn't understood fully yet, so we don't specify any
+ * clockgate functions to use
+ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
new file mode 100644
index 000000000000..cfb25af77c60
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf100.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+
+#ifndef __GF100_THERM_H__
+#define __GF100_THERM_H__
+
+#include <core/device.h>
+
+struct gf100_idle_filter {
+ u32 fecs;
+ u32 hubmmu;
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
index 06dcfd6ee966..0981b02790e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gf119.c
@@ -49,7 +49,7 @@ pwm_info(struct nvkm_therm *therm, int line)
return -ENODEV;
}
-static int
+int
gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
{
struct nvkm_device *device = therm->subdev.device;
@@ -63,7 +63,7 @@ gf119_fan_pwm_ctrl(struct nvkm_therm *therm, int line, bool enable)
return 0;
}
-static int
+int
gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
{
struct nvkm_device *device = therm->subdev.device;
@@ -85,7 +85,7 @@ gf119_fan_pwm_get(struct nvkm_therm *therm, int line, u32 *divs, u32 *duty)
return -EINVAL;
}
-static int
+int
gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
{
struct nvkm_device *device = therm->subdev.device;
@@ -102,7 +102,7 @@ gf119_fan_pwm_set(struct nvkm_therm *therm, int line, u32 divs, u32 duty)
return 0;
}
-static int
+int
gf119_fan_pwm_clock(struct nvkm_therm *therm, int line)
{
struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
new file mode 100644
index 000000000000..4e03971d2e3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+#include <core/device.h>
+
+#include "priv.h"
+#include "gk104.h"
+
+void
+gk104_clkgate_enable(struct nvkm_therm *base)
+{
+ struct gk104_therm *therm = gk104_therm(base);
+ struct nvkm_device *dev = therm->base.subdev.device;
+ const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
+ int i;
+
+ /* Program ENG_MANT, ENG_FILTER */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500);
+ }
+
+ /* magic */
+ nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs);
+ nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu);
+
+ /* Enable clockgating (ENG_CLK = RUN->AUTO) */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045);
+ }
+}
+
+void
+gk104_clkgate_fini(struct nvkm_therm *base, bool suspend)
+{
+ struct gk104_therm *therm = gk104_therm(base);
+ struct nvkm_device *dev = therm->base.subdev.device;
+ const struct gk104_clkgate_engine_info *order = therm->clkgate_order;
+ int i;
+
+ /* ENG_CLK = AUTO->RUN, ENG_PWR = RUN->AUTO */
+ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) {
+ if (!nvkm_device_subdev(dev, order[i].engine))
+ continue;
+
+ nvkm_mask(dev, 0x20200 + order[i].offset, 0xff, 0x54);
+ }
+}
+
+const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[] = {
+ { NVKM_ENGINE_GR, 0x00 },
+ { NVKM_ENGINE_MSPDEC, 0x04 },
+ { NVKM_ENGINE_MSPPP, 0x08 },
+ { NVKM_ENGINE_MSVLD, 0x0c },
+ { NVKM_ENGINE_CE0, 0x10 },
+ { NVKM_ENGINE_CE1, 0x14 },
+ { NVKM_ENGINE_MSENC, 0x18 },
+ { NVKM_ENGINE_CE2, 0x1c },
+ { NVKM_SUBDEV_NR, 0 },
+};
+
+const struct gf100_idle_filter gk104_idle_filter = {
+ .fecs = 0x00001000,
+ .hubmmu = 0x00001000,
+};
+
+static const struct nvkm_therm_func
+gk104_therm_func = {
+ .init = gf119_therm_init,
+ .fini = g84_therm_fini,
+ .pwm_ctrl = gf119_fan_pwm_ctrl,
+ .pwm_get = gf119_fan_pwm_get,
+ .pwm_set = gf119_fan_pwm_set,
+ .pwm_clock = gf119_fan_pwm_clock,
+ .temp_get = g84_temp_get,
+ .fan_sense = gt215_therm_fan_sense,
+ .program_alarms = nvkm_therm_program_alarms_polling,
+ .clkgate_init = gf100_clkgate_init,
+ .clkgate_enable = gk104_clkgate_enable,
+ .clkgate_fini = gk104_clkgate_fini,
+};
+
+static int
+gk104_therm_new_(const struct nvkm_therm_func *func,
+ struct nvkm_device *device,
+ int index,
+ const struct gk104_clkgate_engine_info *clkgate_order,
+ const struct gf100_idle_filter *idle_filter,
+ struct nvkm_therm **ptherm)
+{
+ struct gk104_therm *therm = kzalloc(sizeof(*therm), GFP_KERNEL);
+
+ if (!therm)
+ return -ENOMEM;
+
+ nvkm_therm_ctor(&therm->base, device, index, func);
+ *ptherm = &therm->base;
+ therm->clkgate_order = clkgate_order;
+ therm->idle_filter = idle_filter;
+
+ return 0;
+}
+
+int
+gk104_therm_new(struct nvkm_device *device,
+ int index, struct nvkm_therm **ptherm)
+{
+ return gk104_therm_new_(&gk104_therm_func, device, index,
+ gk104_clkgate_engine_info, &gk104_idle_filter,
+ ptherm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
new file mode 100644
index 000000000000..293e7743b19b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gk104.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Lyude Paul
+ */
+
+#ifndef __GK104_THERM_H__
+#define __GK104_THERM_H__
+#define gk104_therm(p) (container_of((p), struct gk104_therm, base))
+
+#include <subdev/therm.h>
+#include "priv.h"
+#include "gf100.h"
+
+struct gk104_clkgate_engine_info {
+ enum nvkm_devidx engine;
+ u8 offset;
+};
+
+struct gk104_therm {
+ struct nvkm_therm base;
+
+ const struct gk104_clkgate_engine_info *clkgate_order;
+ const struct gf100_idle_filter *idle_filter;
+};
+
+extern const struct gk104_clkgate_engine_info gk104_clkgate_engine_info[];
+extern const struct gf100_idle_filter gk104_idle_filter;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
index c08097f2aff5..4caf401d001a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/gt215.c
@@ -36,7 +36,7 @@ gt215_therm_fan_sense(struct nvkm_therm *therm)
return -ENODEV;
}
-static void
+void
gt215_therm_init(struct nvkm_therm *therm)
{
struct nvkm_device *device = therm->subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
index 1f46e371d7c4..21659daf1864 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h
@@ -32,6 +32,8 @@
int nvkm_therm_new_(const struct nvkm_therm_func *, struct nvkm_device *,
int index, struct nvkm_therm **);
+void nvkm_therm_ctor(struct nvkm_therm *therm, struct nvkm_device *device,
+ int index, const struct nvkm_therm_func *func);
struct nvkm_fan {
struct nvkm_therm *parent;
@@ -66,8 +68,6 @@ int nvkm_therm_fan_set(struct nvkm_therm *, bool now, int percent);
int nvkm_therm_fan_user_get(struct nvkm_therm *);
int nvkm_therm_fan_user_set(struct nvkm_therm *, int percent);
-int nvkm_therm_preinit(struct nvkm_therm *);
-
int nvkm_therm_sensor_init(struct nvkm_therm *);
int nvkm_therm_sensor_fini(struct nvkm_therm *, bool suspend);
void nvkm_therm_sensor_preinit(struct nvkm_therm *);
@@ -96,6 +96,11 @@ struct nvkm_therm_func {
int (*fan_sense)(struct nvkm_therm *);
void (*program_alarms)(struct nvkm_therm *);
+
+ void (*clkgate_init)(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+ void (*clkgate_enable)(struct nvkm_therm *);
+ void (*clkgate_fini)(struct nvkm_therm *, bool);
};
void nv40_therm_intr(struct nvkm_therm *);
@@ -111,9 +116,21 @@ void g84_therm_fini(struct nvkm_therm *);
int gt215_therm_fan_sense(struct nvkm_therm *);
+void gf100_clkgate_init(struct nvkm_therm *,
+ const struct nvkm_therm_clkgate_pack *);
+
void g84_therm_init(struct nvkm_therm *);
+
+int gf119_fan_pwm_ctrl(struct nvkm_therm *, int, bool);
+int gf119_fan_pwm_get(struct nvkm_therm *, int, u32 *, u32 *);
+int gf119_fan_pwm_set(struct nvkm_therm *, int, u32, u32);
+int gf119_fan_pwm_clock(struct nvkm_therm *, int);
void gf119_therm_init(struct nvkm_therm *);
+void gk104_therm_init(struct nvkm_therm *);
+void gk104_clkgate_enable(struct nvkm_therm *);
+void gk104_clkgate_fini(struct nvkm_therm *, bool);
+
int nvkm_fanpwm_create(struct nvkm_therm *, struct dcb_gpio_func *);
int nvkm_fantog_create(struct nvkm_therm *, struct dcb_gpio_func *);
int nvkm_fannil_create(struct nvkm_therm *);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index d34d1cf33895..95f4db70dd22 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -995,7 +995,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
/* calc dclk divider with current vco freq */
dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
pd_min, pd_even);
- if (vclk_div > pd_max)
+ if (dclk_div > pd_max)
break; /* vco is too big, it has to stop */
/* calc score with current vco freq */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 893003fc76a1..2fef09a56d16 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1727,7 +1727,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
kref_get(&bo->list_kref);
if (!list_empty(&bo->ddestroy)) {
- ret = ttm_bo_cleanup_refs(bo, false, false, true);
+ ret = ttm_bo_cleanup_refs(bo, false, false, locked);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 08a3c324242e..60fcef1593dd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -316,7 +316,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
unsigned long offset,
- void *buf, int len, int write)
+ uint8_t *buf, int len, int write)
{
unsigned long page = offset >> PAGE_SHIFT;
unsigned long bytes_left = len;
@@ -345,6 +345,7 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
ttm_bo_kunmap(&map);
page++;
+ buf += bytes;
bytes_left -= bytes;
offset = 0;
} while (bytes_left);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index fba4ec168bd5..74788fdeb773 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -617,7 +617,9 @@ retry:
address, flags);
break;
default:
- printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
+ printk(KERN_ERR "UNKNOWN type=0x%02x event[0]=0x%08x "
+ "event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
+ type, event[0], event[1], event[2], event[3]);
}
memset(__evt, 0, 4 * sizeof(u32));
@@ -1816,7 +1818,8 @@ static bool dma_ops_domain(struct protection_domain *domain)
return domain->flags & PD_DMA_OPS_MASK;
}
-static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
+static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ bool ats, bool ppr)
{
u64 pte_root = 0;
u64 flags = 0;
@@ -1833,6 +1836,13 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
if (ats)
flags |= DTE_FLAG_IOTLB;
+ if (ppr) {
+ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
+
+ if (iommu_feature(iommu, FEATURE_EPHSUP))
+ pte_root |= 1ULL << DEV_ENTRY_PPR;
+ }
+
if (domain->flags & PD_IOMMUV2_MASK) {
u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl);
u64 glx = domain->glx;
@@ -1895,9 +1905,9 @@ static void do_attach(struct iommu_dev_data *dev_data,
domain->dev_cnt += 1;
/* Update device table */
- set_dte_entry(dev_data->devid, domain, ats);
+ set_dte_entry(dev_data->devid, domain, ats, dev_data->iommu_v2);
if (alias != dev_data->devid)
- set_dte_entry(alias, domain, ats);
+ set_dte_entry(alias, domain, ats, dev_data->iommu_v2);
device_flush_dte(dev_data);
}
@@ -2276,13 +2286,15 @@ static void update_device_table(struct protection_domain *domain)
struct iommu_dev_data *dev_data;
list_for_each_entry(dev_data, &domain->dev_list, list) {
- set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
+ set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled,
+ dev_data->iommu_v2);
if (dev_data->devid == dev_data->alias)
continue;
/* There is an alias, update device table entry for it */
- set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled);
+ set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled,
+ dev_data->iommu_v2);
}
}
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f6b24c7d8b70..6a877ebd058b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -98,6 +98,7 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
#define FEATURE_GAM_VAPIC (1ULL<<21)
+#define FEATURE_EPHSUP (1ULL<<50)
#define FEATURE_PASID_SHIFT 32
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
@@ -192,6 +193,7 @@
/* macros and definitions for device table entries */
#define DEV_ENTRY_VALID 0x00
#define DEV_ENTRY_TRANSLATION 0x01
+#define DEV_ENTRY_PPR 0x34
#define DEV_ENTRY_IR 0x3d
#define DEV_ENTRY_IW 0x3e
#define DEV_ENTRY_NO_PAGE_FAULT 0x62
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 744592d330ca..3f2f1fc68b52 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -2971,7 +2971,7 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", NULL);
+IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3");
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 78d4c6b8f1ba..69e7c60792a8 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -2211,12 +2211,12 @@ static struct platform_driver arm_smmu_driver = {
};
module_platform_driver(arm_smmu_driver);
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1");
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2");
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400");
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401");
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500");
+IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2");
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 79c45650f8de..2138102ef611 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1353,8 +1353,15 @@ static const struct iommu_ops exynos_iommu_ops = {
static int __init exynos_iommu_init(void)
{
+ struct device_node *np;
int ret;
+ np = of_find_matching_node(NULL, sysmmu_of_match);
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
if (!lv2table_kmem_cache) {
@@ -1394,4 +1401,4 @@ err_reg_driver:
}
core_initcall(exynos_iommu_init);
-IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
+IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu");
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a1373cf34326..582fd01cb7d1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -64,7 +64,7 @@
#define IOAPIC_RANGE_END (0xfeefffff)
#define IOVA_START_ADDR (0x1000)
-#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
+#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
#define MAX_AGAW_WIDTH 64
#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
@@ -1601,8 +1601,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
* flush. However, device IOTLB doesn't need to be flushed in this case.
*/
if (!cap_caching_mode(iommu->cap) || !map)
- iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
- addr, mask);
+ iommu_flush_dev_iotlb(domain, addr, mask);
}
static void iommu_flush_iova(struct iova_domain *iovad)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 0a826eb7fe48..35a408d0ae4f 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -26,6 +26,10 @@
#include <linux/interrupt.h>
#include <asm/page.h>
+#define PASID_ENTRY_P BIT_ULL(0)
+#define PASID_ENTRY_FLPM_5LP BIT_ULL(9)
+#define PASID_ENTRY_SRE BIT_ULL(11)
+
static irqreturn_t prq_event_thread(int irq, void *d);
struct pasid_entry {
@@ -41,6 +45,14 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
struct page *pages;
int order;
+ if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
+ !cap_fl1gp_support(iommu->cap))
+ return -EINVAL;
+
+ if (cpu_feature_enabled(X86_FEATURE_LA57) &&
+ !cap_5lp_support(iommu->cap))
+ return -EINVAL;
+
/* Start at 2 because it's defined as 2^(1+PSS) */
iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
@@ -129,6 +141,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
iommu->name);
dmar_free_hwirq(irq);
+ iommu->pr_irq = 0;
goto err;
}
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
@@ -144,9 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
- free_irq(iommu->pr_irq, iommu);
- dmar_free_hwirq(iommu->pr_irq);
- iommu->pr_irq = 0;
+ if (iommu->pr_irq) {
+ free_irq(iommu->pr_irq, iommu);
+ dmar_free_hwirq(iommu->pr_irq);
+ iommu->pr_irq = 0;
+ }
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
iommu->prq = NULL;
@@ -290,6 +305,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
struct mm_struct *mm = NULL;
+ u64 pasid_entry_val;
int pasid_max;
int ret;
@@ -396,9 +412,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
kfree(sdev);
goto out;
}
- iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
+ pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P;
} else
- iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
+ pasid_entry_val = (u64)__pa(init_mm.pgd) |
+ PASID_ENTRY_P | PASID_ENTRY_SRE;
+ if (cpu_feature_enabled(X86_FEATURE_LA57))
+ pasid_entry_val |= PASID_ENTRY_FLPM_5LP;
+
+ iommu->pasid_table[svm->pasid].val = pasid_entry_val;
+
wmb();
/* In caching mode, we still have to flush with PASID 0 when
* a PASID table entry becomes present. Not entirely clear
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3de5c0bcb5cc..69fef991c651 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1303,6 +1303,9 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
int ret;
group = iommu_group_get(dev);
+ if (!group)
+ return -ENODEV;
+
/*
* Lock the group to make sure the device-count doesn't
* change while we are attaching
@@ -1341,6 +1344,8 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
struct iommu_group *group;
group = iommu_group_get(dev);
+ if (!group)
+ return;
mutex_lock(&group->mutex);
if (iommu_group_device_count(group) != 1) {
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 8dce3a9de9d8..40ae6e87cb88 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1108,18 +1108,8 @@ static void __exit ipmmu_exit(void)
subsys_initcall(ipmmu_init);
module_exit(ipmmu_exit);
-#ifdef CONFIG_IOMMU_DMA
-static int __init ipmmu_vmsa_iommu_of_setup(struct device_node *np)
-{
- ipmmu_init();
- return 0;
-}
-
-IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa",
- ipmmu_vmsa_iommu_of_setup);
-IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795",
- ipmmu_vmsa_iommu_of_setup);
-#endif
+IOMMU_OF_DECLARE(ipmmu_vmsa_iommu_of, "renesas,ipmmu-vmsa");
+IOMMU_OF_DECLARE(ipmmu_r8a7795_iommu_of, "renesas,ipmmu-r8a7795");
MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU");
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 04f4d51ffacb..0d3350463a3f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -823,6 +823,8 @@ static int msm_iommu_probe(struct platform_device *pdev)
goto fail;
}
+ bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
@@ -875,19 +877,7 @@ static void __exit msm_iommu_driver_exit(void)
subsys_initcall(msm_iommu_driver_init);
module_exit(msm_iommu_driver_exit);
-static int __init msm_iommu_init(void)
-{
- bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
- return 0;
-}
-
-static int __init msm_iommu_of_setup(struct device_node *np)
-{
- msm_iommu_init();
- return 0;
-}
-
-IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu", msm_iommu_of_setup);
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,apq8064-iommu");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 50947ebb6d17..5c36a8b7656a 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -231,19 +231,3 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
return ops;
}
-
-static int __init of_iommu_init(void)
-{
- struct device_node *np;
- const struct of_device_id *match, *matches = &__iommu_of_table;
-
- for_each_matching_node_and_match(np, matches, &match) {
- const of_iommu_init_fn init_fn = match->data;
-
- if (init_fn && init_fn(np))
- pr_err("Failed to initialise IOMMU %pOF\n", np);
- }
-
- return 0;
-}
-postcore_initcall_sync(of_iommu_init);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 505548aafeff..50217548c3b8 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -274,8 +274,8 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
if (!obj->debug_dir)
return;
- d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
- (u8 *)&obj->nr_tlb_entries);
+ d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir,
+ &obj->nr_tlb_entries);
if (!d)
return;
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index e07f02d00c68..65b9c99707f8 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -947,7 +947,7 @@ static void __exit qcom_iommu_exit(void)
module_init(qcom_iommu_init);
module_exit(qcom_iommu_exit);
-IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1", NULL);
+IOMMU_OF_DECLARE(qcom_iommu_dev, "qcom,msm-iommu-v1");
MODULE_DESCRIPTION("IOMMU API for QCOM IOMMU v1 implementations");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index a341938c7e2c..3633202e18f4 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -452,10 +452,12 @@ static irqreturn_t vop_virtio_intr_handler(int irq, void *data)
static void vop_virtio_release_dev(struct device *_d)
{
- /*
- * No need for a release method similar to virtio PCI.
- * Provide an empty one to avoid getting a warning from core.
- */
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct _vop_vdev *vop_vdev =
+ container_of(vdev, struct _vop_vdev, vdev);
+
+ kfree(vop_vdev);
}
/*
@@ -466,7 +468,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
unsigned int offset, struct vop_device *vpdev,
int dnode)
{
- struct _vop_vdev *vdev;
+ struct _vop_vdev *vdev, *reg_dev = NULL;
int ret;
u8 type = ioread8(&d->type);
@@ -497,6 +499,7 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
vdev->c2h_vdev_db = ioread8(&vdev->dc->c2h_vdev_db);
ret = register_virtio_device(&vdev->vdev);
+ reg_dev = vdev;
if (ret) {
dev_err(_vop_dev(vdev),
"Failed to register vop device %u type %u\n",
@@ -512,7 +515,10 @@ static int _vop_add_device(struct mic_device_desc __iomem *d,
free_irq:
vpdev->hw_ops->free_irq(vpdev, vdev->virtio_cookie, vdev);
kfree:
- kfree(vdev);
+ if (reg_dev)
+ put_device(&vdev->vdev.dev);
+ else
+ kfree(vdev);
return ret;
}
@@ -568,7 +574,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
iowrite8(-1, &dc->h2c_vdev_db);
if (status & VIRTIO_CONFIG_S_DRIVER_OK)
wait_for_completion(&vdev->reset_done);
- kfree(vdev);
+ put_device(&vdev->vdev.dev);
iowrite8(1, &dc->guest_ack);
dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
__func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 0eae619419d9..620c2d90a646 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -874,7 +874,6 @@ config MMC_CQHCI
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
- help
config MMC_BCM2835
tristate "Broadcom BCM2835 SDHOST MMC Controller support"
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 7d1e4e2aaad0..ce1eed7a6d63 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -213,7 +213,7 @@ struct rx_tx_queue_stats {
struct q_desc_mem {
dma_addr_t dma;
u64 size;
- u16 q_len;
+ u32 q_len;
dma_addr_t phys_base;
void *base;
void *unalign_base;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1ca2a39ed0f8..56bc626ef006 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -5166,7 +5166,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
- goto out_unmap_bar0;
+ goto out_free_adapter;
/* We control everything through one PF */
whoami = readl(regs + PL_WHOAMI_A);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index afaf29b201dc..27447260215d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -354,6 +354,8 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter)
{
kfree(adapter->tx_stats_buffers);
kfree(adapter->rx_stats_buffers);
+ adapter->tx_stats_buffers = NULL;
+ adapter->rx_stats_buffers = NULL;
}
static int init_stats_buffers(struct ibmvnic_adapter *adapter)
@@ -599,6 +601,8 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
kfree(adapter->vpd->buff);
kfree(adapter->vpd);
+
+ adapter->vpd = NULL;
}
static void release_tx_pools(struct ibmvnic_adapter *adapter)
@@ -909,6 +913,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
dev_err(dev, "Could not map VPD buffer\n");
kfree(adapter->vpd->buff);
+ adapter->vpd->buff = NULL;
return -ENOMEM;
}
@@ -1414,10 +1419,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
hdrs += 2;
}
/* determine if l2/3/4 headers are sent to firmware */
- if ((*hdrs >> 7) & 1 &&
- (skb->protocol == htons(ETH_P_IP) ||
- skb->protocol == htons(ETH_P_IPV6) ||
- skb->protocol == htons(ETH_P_ARP))) {
+ if ((*hdrs >> 7) & 1) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
tx_buff->indir_arr[0] = tx_crq;
@@ -1639,6 +1641,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
return rc;
} else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
+ adapter->map_id = 1;
release_rx_pools(adapter);
release_tx_pools(adapter);
init_rx_pools(netdev);
@@ -1831,7 +1834,8 @@ restart_poll:
u16 offset;
u8 flags = 0;
- if (unlikely(adapter->resetting)) {
+ if (unlikely(adapter->resetting &&
+ adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
napi_complete_done(napi, frames_processed);
return frames_processed;
@@ -2908,8 +2912,12 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
cpu_to_be64(u64_crq[1]));
if (rc) {
- if (rc == H_CLOSED)
+ if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
+ if (adapter->resetting)
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ }
+
dev_warn(dev, "Send error (rc=%d)\n", rc);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index f95ce9b5e4fb..e31adbc75f9c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1785,7 +1785,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
u16 sections = 0;
u8 netdev_tc = 0;
- u16 numtc = 0;
+ u16 numtc = 1;
u16 qcount;
u8 offset;
u16 qmap;
@@ -1795,9 +1795,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
offset = 0;
+ /* Number of queues per enabled TC */
+ num_tc_qps = vsi->alloc_queue_pairs;
if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
/* Find numtc from enabled TC bitmap */
- for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0, numtc = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (enabled_tc & BIT(i)) /* TC is enabled */
numtc++;
}
@@ -1805,18 +1807,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
numtc = 1;
}
- } else {
- /* At least TC0 is enabled in non-DCB, non-MQPRIO case */
- numtc = 1;
+ num_tc_qps = num_tc_qps / numtc;
+ num_tc_qps = min_t(int, num_tc_qps,
+ i40e_pf_get_max_q_per_tc(pf));
}
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
- /* Number of queues per enabled TC */
- qcount = vsi->alloc_queue_pairs;
-
- num_tc_qps = qcount / numtc;
- num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
/* Do not allow use more TC queue pairs than MSI-X vectors exist */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -1831,9 +1828,13 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) {
case I40E_VSI_MAIN:
- qcount = min_t(int, pf->alloc_rss_size,
- num_tc_qps);
- break;
+ if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED)) ||
+ vsi->tc_config.enabled_tc != 1) {
+ qcount = min_t(int, pf->alloc_rss_size,
+ num_tc_qps);
+ break;
+ }
case I40E_VSI_FDIR:
case I40E_VSI_SRIOV:
case I40E_VSI_VMDQ2:
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 322027792fe8..34e98aa6b956 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -35,6 +35,7 @@
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
+#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
@@ -87,9 +88,20 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{
+ struct nfp_pf *pf = app->pf;
struct nfp_bpf_vnic *bv;
int err;
+ if (!pf->eth_tbl) {
+ nfp_err(pf->cpp, "No ETH table\n");
+ return -EINVAL;
+ }
+ if (pf->max_data_vnics != pf->eth_tbl->count) {
+ nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
+ pf->max_data_vnics, pf->eth_tbl->count);
+ return -EINVAL;
+ }
+
bv = kzalloc(sizeof(*bv), GFP_KERNEL);
if (!bv)
return -ENOMEM;
@@ -170,6 +182,7 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
return err;
bv->tc_prog = cls_bpf->prog;
+ nn->port->tc_offload_cnt = !!bv->tc_prog;
return 0;
}
@@ -207,13 +220,6 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
}
}
-static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
-{
- struct nfp_bpf_vnic *bv = nn->app_priv;
-
- return !!bv->tc_prog;
-}
-
static int
nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
@@ -417,7 +423,6 @@ const struct nfp_app_type app_bpf = {
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
.setup_tc = nfp_bpf_setup_tc,
- .tc_busy = nfp_bpf_tc_busy,
.bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 08c4c6dc5f7f..eb5c13dea8f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -349,6 +349,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow, bool egress)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
@@ -390,6 +391,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
INIT_HLIST_NODE(&flow_pay->link);
flow_pay->tc_flower_cookie = flow->cookie;
hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
+ port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -421,6 +423,7 @@ static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct tc_cls_flower_offload *flow)
{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_fl_payload *nfp_flow;
int err;
@@ -442,6 +445,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
err_free_flow:
hash_del_rcu(&nfp_flow->link);
+ port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 437964afa8ee..20546ae67909 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -92,7 +92,6 @@ extern const struct nfp_app_type app_flower;
* @stop: stop application logic
* @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo
- * @tc_busy: TC HW offload busy (rules loaded)
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
@@ -135,7 +134,6 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
- bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
@@ -301,13 +299,6 @@ static inline bool nfp_app_has_tc(struct nfp_app *app)
return app && app->type->setup_tc;
}
-static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn)
-{
- if (!app || !app->type->tc_busy)
- return false;
- return app->type->tc_busy(app, nn);
-}
-
static inline int nfp_app_setup_tc(struct nfp_app *app,
struct net_device *netdev,
enum tc_setup_type type, void *type_data)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 3f6952b66a49..1e597600c693 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -107,7 +107,7 @@ u16 immed_get_value(u64 instr)
if (!unreg_is_imm(reg))
reg = FIELD_GET(OP_IMMED_B_SRC, instr);
- return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
+ return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr) << 8;
}
void immed_set_value(u64 *instr, u16 immed)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index cc570bb6563c..ab301d56430b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -649,3 +649,4 @@ MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
+MODULE_VERSION(UTS_RELEASE);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index c0fd351c86b1..a05be0ab2713 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3210,10 +3210,9 @@ static int nfp_net_set_features(struct net_device *netdev,
new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
}
- if (changed & NETIF_F_HW_TC && nfp_app_tc_busy(nn->app, nn)) {
- nn_err(nn, "Cannot disable HW TC offload while in use\n");
- return -EBUSY;
- }
+ err = nfp_port_set_features(netdev, features);
+ if (err)
+ return err;
nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
netdev->features, features, changed);
@@ -3734,7 +3733,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->features = netdev->hw_features;
- if (nfp_app_has_tc(nn->app))
+ if (nfp_app_has_tc(nn->app) && nn->port)
netdev->hw_features |= NETIF_F_HW_TC;
/* Advertise but disable TSO by default. */
@@ -3751,6 +3750,8 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = nn->max_mtu;
+ netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+
netif_carrier_off(netdev);
nfp_net_set_ethtool_ops(netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index eeecef2caac6..4499a7333078 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -59,9 +59,12 @@
#define NFP_NET_RX_OFFSET 32
/**
- * Maximum header size supported for LSO frames
+ * LSO parameters
+ * %NFP_NET_LSO_MAX_HDR_SZ: Maximum header size supported for LSO frames
+ * %NFP_NET_LSO_MAX_SEGS: Maximum number of segments LSO frame can produce
*/
#define NFP_NET_LSO_MAX_HDR_SZ 255
+#define NFP_NET_LSO_MAX_SEGS 64
/**
* Prepend field types
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index f67da6bde9da..619570524d2a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -265,6 +265,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_set_features = nfp_port_set_features,
};
static void nfp_repr_clean(struct nfp_repr *repr)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 34a6e035fe9a..7bd8be5c833b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -32,6 +32,7 @@
*/
#include <linux/lockdep.h>
+#include <linux/netdevice.h>
#include <net/switchdev.h>
#include "nfpcore/nfp_cpp.h"
@@ -100,6 +101,23 @@ int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
return nfp_app_setup_tc(port->app, netdev, type, type_data);
}
+int nfp_port_set_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct nfp_port *port;
+
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
+ return 0;
+
+ if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+ port->tc_offload_cnt) {
+ netdev_err(netdev, "Cannot disable HW TC offload while offloads active\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 21bd4aa32646..fa7e669a969c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -72,6 +72,8 @@ enum nfp_port_flags {
* @netdev: backpointer to associated netdev
* @type: what port type does the entity represent
* @flags: port flags
+ * @tc_offload_cnt: number of active TC offloads, how offloads are counted
+ * is not defined, use as a boolean
* @app: backpointer to the app structure
* @dl_port: devlink port structure
* @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme
@@ -87,6 +89,7 @@ struct nfp_port {
enum nfp_port_type type;
unsigned long flags;
+ unsigned long tc_offload_cnt;
struct nfp_app *app;
@@ -121,6 +124,9 @@ static inline bool nfp_port_is_vnic(const struct nfp_port *port)
return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT;
}
+int
+nfp_port_set_features(struct net_device *netdev, netdev_features_t features);
+
struct nfp_port *nfp_port_from_netdev(struct net_device *netdev);
struct nfp_port *
nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 540d21786a43..ef10baf14186 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -74,8 +74,6 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
/* Mask GMAC interrupts */
value = GMAC_INT_DEFAULT_MASK;
- if (hw->pmt)
- value &= ~GMAC_INT_DISABLE_PMT;
if (hw->pcs)
value &= ~GMAC_INT_DISABLE_PCS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 789dad8a07b5..7761a26ec9c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -98,7 +98,7 @@
#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
GMAC_INT_PCS_ANE)
-#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
+#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN)
enum dwmac4_irq_status {
time_stamp_irq = 0x00001000,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index ed222b20fcf1..63795ecafc8d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -61,10 +61,9 @@ static void dwmac4_core_init(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_CONFIG);
- /* Mask GMAC interrupts */
- value = GMAC_INT_DEFAULT_MASK;
- if (hw->pmt)
- value |= GMAC_INT_PMT_EN;
+ /* Enable GMAC interrupts */
+ value = GMAC_INT_DEFAULT_ENABLE;
+
if (hw->pcs)
value |= GMAC_PCS_IRQ_DEFAULT;
@@ -572,10 +571,12 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
- u32 intr_status;
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
int ret = 0;
- intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ /* Discard disabled bits */
+ intr_status &= intr_enable;
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & mmc_tx_irq))
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index b2caf5132bd2..7b982e02ea3a 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Sun network device configuration
#
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 113bd57e2ea0..9020b084b953 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
*
* Copyright (C) 2004 Sun Microsystems Inc.
diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h
index 882ce168a799..13f3860496a8 100644
--- a/drivers/net/ethernet/sun/cassini.h
+++ b/drivers/net/ethernet/sun/cassini.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $
* cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver.
*
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 5ea037672e6f..a5dd627fe2f9 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver.
*
* Copyright (C) 2016-2017 Oracle. All rights reserved.
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 06001bacbe0f..8dd545fed30d 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* niu.c: Neptune ethernet driver.
*
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0b1f41f6bceb..f047b2797156 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters.
*
* Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index a7afcee3c5ae..7a16d40a72d1 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $
* sungem.c: Sun GEM ethernet driver.
*
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 0431f1e5f511..06da2f59fcbf 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
* auto carrier detecting ethernet driver. Also known as the
* "Happy Meal Ethernet" found on SunSwift SBUS cards.
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index a6bcdcdd947e..7fe0d5e33922 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 27fb22638885..63d3d6b215f3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8aa3ce46bb81..d8f4c3f28150 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/* sunvnet.c: Sun LDOM Virtual Network Driver.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 3c85a0885f9b..1b1b78fdc138 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1636,6 +1636,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
q_idx = q_idx % cpsw->tx_ch_num;
txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
ret = cpsw_tx_packet_submit(priv, skb, txch);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
@@ -1646,15 +1647,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
* tell the kernel to stop sending us tx frames.
*/
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
- txq = netdev_get_tx_queue(ndev, q_idx);
netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
}
return NETDEV_TX_OK;
fail:
ndev->stats.tx_dropped++;
- txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f3313a129531..e3e29c2b028b 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -822,7 +822,7 @@ void phy_start(struct phy_device *phydev)
phy_resume(phydev);
/* make sure interrupts are re-enabled for the PHY */
- if (phydev->irq != PHY_POLL) {
+ if (phy_interrupt_is_valid(phydev)) {
err = phy_enable_interrupts(phydev);
if (err < 0)
break;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0dc66e4fbb2c..17e496b88f81 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
struct tun_struct *detached;
struct ptr_ring tx_ring;
struct xdp_rxq_info xdp_rxq;
+ int xdp_pending_pkts;
};
struct tun_flow_entry {
@@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
case XDP_REDIRECT:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
+ ++tfile->xdp_pending_pkts;
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
@@ -1986,6 +1988,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
result = tun_get_user(tun, tfile, NULL, from,
file->f_flags & O_NONBLOCK, false);
+ if (tfile->xdp_pending_pkts) {
+ tfile->xdp_pending_pkts = 0;
+ xdp_do_flush_map();
+ }
+
tun_put(tun);
return result;
}
@@ -2322,6 +2329,13 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
+
+ if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
+ !(m->msg_flags & MSG_MORE)) {
+ tfile->xdp_pending_pkts = 0;
+ xdp_do_flush_map();
+ }
+
tun_put(tun);
return ret;
}
@@ -3153,6 +3167,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+ tfile->xdp_pending_pkts = 0;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index b0fdc1023619..f3ec13b80b20 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -91,6 +91,35 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
+ .id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID_UBNT,
+ .name = "qca988x hw2.0 ubiquiti",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ },
+ {
.id = QCA9887_HW_1_0_VERSION,
.dev_id = QCA9887_1_0_DEVICE_ID,
.name = "qca9887 hw1.0",
@@ -1276,10 +1305,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
len -= sizeof(*hdr);
data = hdr->data;
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
- if (len < ie_len) {
+ if (len < ALIGN(ie_len, 4)) {
ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
ret = -EINVAL;
@@ -1318,6 +1344,9 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
goto out;
}
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
len -= ie_len;
data += ie_len;
}
@@ -1448,9 +1477,6 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
len -= sizeof(*hdr);
data += sizeof(*hdr);
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
if (len < ie_len) {
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
@@ -1556,6 +1582,9 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
break;
}
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
len -= ie_len;
data += ie_len;
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 4dde126dab17..7173b3743b43 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -616,7 +617,7 @@ static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
.start = 0x400000,
- .len = 0x90000,
+ .len = 0xa8000,
.name = "DRAM",
.section_table = {
.sections = NULL,
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 6d836a26272f..554cd7856cb6 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -81,6 +82,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
void ath10k_debug_print_board_info(struct ath10k *ar)
{
char boardinfo[100];
+ const struct firmware *board;
+ u32 crc;
if (ar->id.bmi_ids_valid)
scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
@@ -88,11 +91,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
else
scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+ board = ar->normal_mode_fw.board;
+ if (!IS_ERR_OR_NULL(board))
+ crc = crc32_le(0, board->data, board->size);
+ else
+ crc = 0;
+
ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
ar->bd_api,
boardinfo,
- crc32_le(0, ar->normal_mode_fw.board->data,
- ar->normal_mode_fw.board->size));
+ crc);
}
void ath10k_debug_print_boot_info(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 6203bc65799b..413b1b4321f7 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -22,6 +22,7 @@
#define ATH10K_FW_DIR "ath10k"
+#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
#define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 355db6a0fcf3..1b266cd0c2ec 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -58,6 +58,9 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
static const struct pci_device_id ath10k_pci_id_table[] = {
+ /* PCI-E QCA988X V2 (Ubiquiti branded) */
+ { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
+
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
@@ -74,6 +77,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
* hacks. ath10k doesn't have them and these devices crash horribly
* because of that.
*/
+ { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
{ QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
@@ -2193,6 +2197,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->pdev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
case QCA988X_2_0_DEVICE_ID:
case QCA99X0_2_0_DEVICE_ID:
case QCA9888_2_0_DEVICE_ID:
@@ -3424,6 +3429,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 3d9447e21025..695c779ae8cf 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -72,7 +72,7 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
s16 nf)
{
- s8 noise = ath9k_hw_get_default_nf(ah, chan, 0);
+ s8 noise = ATH_DEFAULT_NOISE_FLOOR;
if (nf) {
s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 56676eaff24c..cb0eef13af1c 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -24,6 +24,7 @@ static const struct usb_device_id ath9k_hif_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
+ { USB_DEVICE(0x07b8, 0x9271) }, /* Altai WA1011N-GU */
{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 8027bb7c03c2..fcb208d1f276 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -98,6 +98,7 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
reorder_work.work);
struct mt76_dev *dev = tid->dev;
struct sk_buff_head frames;
+ int nframes;
__skb_queue_head_init(&frames);
@@ -105,14 +106,44 @@ mt76_rx_aggr_reorder_work(struct work_struct *work)
spin_lock(&tid->lock);
mt76_rx_aggr_check_release(tid, &frames);
+ nframes = tid->nframes;
spin_unlock(&tid->lock);
- ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
+ if (nframes)
+ ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work,
+ REORDER_TIMEOUT);
mt76_rx_complete(dev, &frames, -1);
local_bh_enable();
}
+static void
+mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
+ struct mt76_wcid *wcid = status->wcid;
+ struct mt76_rx_tid *tid;
+ u16 seqno;
+
+ if (!ieee80211_is_ctl(bar->frame_control))
+ return;
+
+ if (!ieee80211_is_back_req(bar->frame_control))
+ return;
+
+ status->tid = le16_to_cpu(bar->control) >> 12;
+ seqno = le16_to_cpu(bar->start_seq_num) >> 4;
+ tid = rcu_dereference(wcid->aggr[status->tid]);
+ if (!tid)
+ return;
+
+ spin_lock_bh(&tid->lock);
+ mt76_rx_aggr_release_frames(tid, frames, seqno);
+ mt76_rx_aggr_release_head(tid, frames);
+ spin_unlock_bh(&tid->lock);
+}
+
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
{
struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
@@ -126,9 +157,14 @@ void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
__skb_queue_tail(frames, skb);
sta = wcid_to_sta(wcid);
- if (!sta || !status->aggr)
+ if (!sta)
return;
+ if (!status->aggr) {
+ mt76_rx_aggr_check_ctl(skb, frames);
+ return;
+ }
+
tid = rcu_dereference(wcid->aggr[status->tid]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 5fcb2deb89a2..85f8d324ebf8 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -276,6 +276,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, AP_LINK_PS);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -470,6 +471,53 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
return 0;
}
+static void
+mt76_check_ps(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid = status->wcid;
+ bool ps;
+
+ if (!wcid || !wcid->sta)
+ return;
+
+ sta = container_of((void *) wcid, struct ieee80211_sta, drv_priv);
+
+ if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
+ return;
+
+ if (ieee80211_is_pspoll(hdr->frame_control)) {
+ ieee80211_sta_pspoll(sta);
+ return;
+ }
+
+ if (ieee80211_has_morefrags(hdr->frame_control) ||
+ !(ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_data(hdr->frame_control)))
+ return;
+
+ ps = ieee80211_has_pm(hdr->frame_control);
+
+ if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))
+ ieee80211_sta_uapsd_trigger(sta, status->tid);
+
+ if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
+ return;
+
+ if (ps) {
+ set_bit(MT_WCID_FLAG_PS, &wcid->flags);
+ mt76_stop_tx_queues(dev, sta, true);
+ } else {
+ clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
+ }
+
+ ieee80211_sta_ps_transition(sta, ps);
+ dev->drv->sta_ps(dev, sta, ps);
+}
+
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
int queue)
{
@@ -498,8 +546,10 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q)
__skb_queue_head_init(&frames);
- while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL)
+ while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
+ mt76_check_ps(dev, skb);
mt76_rx_aggr_reorder(skb, &frames);
+ }
mt76_rx_complete(dev, &frames, q);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 129015c9d116..d2ce15093edd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -121,11 +121,18 @@ struct mt76_queue_ops {
void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
};
+enum mt76_wcid_flags {
+ MT_WCID_FLAG_CHECK_PS,
+ MT_WCID_FLAG_PS,
+};
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
struct work_struct aggr_work;
+ unsigned long flags;
+
u8 idx;
u8 hw_key_idx;
@@ -206,6 +213,9 @@ struct mt76_driver_ops {
struct sk_buff *skb);
void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
+
+ void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
+ bool ps);
};
struct mt76_channel_state {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2.h
index 17df17afd9bf..e62131b88102 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2.h
@@ -218,6 +218,8 @@ void mt76x2_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt76x2_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
+void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+
void mt76x2_update_channel(struct mt76_dev *mdev);
s8 mt76x2_tx_get_max_txpwr_adj(struct mt76x2_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
index 1b00ae4465a2..9dbf94947324 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_init.c
@@ -630,6 +630,7 @@ struct mt76x2_dev *mt76x2_alloc_device(struct device *pdev)
.tx_complete_skb = mt76x2_tx_complete_skb,
.rx_skb = mt76x2_queue_rx_skb,
.rx_poll_complete = mt76x2_rx_poll_complete,
+ .sta_ps = mt76x2_sta_ps,
};
struct ieee80211_hw *hw;
struct mt76x2_dev *dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
index 6c30b5eaa9ca..7ea3d841918e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac.c
@@ -341,7 +341,7 @@ int mt76x2_mac_process_rx(struct mt76x2_dev *dev, struct sk_buff *skb,
mt76x2_remove_hdr_pad(skb, pad_len);
- if (rxinfo & MT_RXINFO_BA)
+ if ((rxinfo & MT_RXINFO_BA) && !(rxinfo & MT_RXINFO_NULL))
status->aggr = true;
if (WARN_ON_ONCE(len > skb->len))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
index bf26284b9989..205043b470b2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_main.c
@@ -282,6 +282,9 @@ mt76x2_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
mt76x2_txq_init(dev, sta->txq[i]);
+ if (vif->type == NL80211_IFTYPE_AP)
+ set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
+
rcu_assign_pointer(dev->wcid[idx], &msta->wcid);
out:
@@ -311,23 +314,14 @@ mt76x2_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return 0;
}
-static void
-mt76x2_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+void
+mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
struct mt76x2_sta *msta = (struct mt76x2_sta *) sta->drv_priv;
- struct mt76x2_dev *dev = hw->priv;
+ struct mt76x2_dev *dev = container_of(mdev, struct mt76x2_dev, mt76);
int idx = msta->wcid.idx;
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- mt76x2_mac_wcid_set_drop(dev, idx, true);
- mt76_stop_tx_queues(&dev->mt76, sta, true);
- break;
- case STA_NOTIFY_AWAKE:
- mt76x2_mac_wcid_set_drop(dev, idx, false);
- break;
- }
+ mt76x2_mac_wcid_set_drop(dev, idx, ps);
}
static int
@@ -549,6 +543,12 @@ static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
mutex_unlock(&dev->mutex);
}
+static int
+mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ return 0;
+}
+
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x2_tx,
.start = mt76x2_start,
@@ -560,7 +560,6 @@ const struct ieee80211_ops mt76x2_ops = {
.bss_info_changed = mt76x2_bss_info_changed,
.sta_add = mt76x2_sta_add,
.sta_remove = mt76x2_sta_remove,
- .sta_notify = mt76x2_sta_notify,
.set_key = mt76x2_set_key,
.conf_tx = mt76x2_conf_tx,
.sw_scan_start = mt76x2_sw_scan,
@@ -573,5 +572,6 @@ const struct ieee80211_ops mt76x2_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x2_set_coverage_class,
.get_survey = mt76_get_survey,
+ .set_tim = mt76x2_set_tim,
};
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index f20e77b4bb65..317c1b3101da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1123,7 +1123,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_word(rtlpriv, read_addr);
+ ret = rtl_read_byte(rtlpriv, read_addr);
}
return ret;
}
@@ -1165,7 +1165,8 @@ static void _rtl8821ae_enable_aspm_back_door(struct ieee80211_hw *hw)
}
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x70f);
- _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7));
+ _rtl8821ae_dbi_write(rtlpriv, 0x70f, tmp | BIT(7) |
+ ASPM_L1_LATENCY << 3);
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x719);
_rtl8821ae_dbi_write(rtlpriv, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index a7aacbc3984e..46dcb7fef195 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -99,6 +99,7 @@
#define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
+#define ASPM_L1_LATENCY 7
#define TOTAL_CAM_ENTRY 32
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9bd7ddeeb6a5..8328d395e332 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -351,6 +351,9 @@ static int xennet_open(struct net_device *dev)
unsigned int i = 0;
struct netfront_queue *queue = NULL;
+ if (!np->queues)
+ return -ENODEV;
+
for (i = 0; i < num_queues; ++i) {
queue = &np->queues[i];
napi_enable(&queue->napi);
@@ -1358,18 +1361,8 @@ static int netfront_probe(struct xenbus_device *dev,
#ifdef CONFIG_SYSFS
info->netdev->sysfs_groups[0] = &xennet_dev_group;
#endif
- err = register_netdev(info->netdev);
- if (err) {
- pr_warn("%s: register_netdev err=%d\n", __func__, err);
- goto fail;
- }
return 0;
-
- fail:
- xennet_free_netdev(netdev);
- dev_set_drvdata(&dev->dev, NULL);
- return err;
}
static void xennet_end_access(int ref, void *page)
@@ -1737,8 +1730,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
{
unsigned int i;
- rtnl_lock();
-
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
@@ -1747,8 +1738,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
netif_napi_del(&queue->napi);
}
- rtnl_unlock();
-
kfree(info->queues);
info->queues = NULL;
}
@@ -1764,8 +1753,6 @@ static int xennet_create_queues(struct netfront_info *info,
if (!info->queues)
return -ENOMEM;
- rtnl_lock();
-
for (i = 0; i < *num_queues; i++) {
struct netfront_queue *queue = &info->queues[i];
@@ -1774,7 +1761,7 @@ static int xennet_create_queues(struct netfront_info *info,
ret = xennet_init_queue(queue);
if (ret < 0) {
- dev_warn(&info->netdev->dev,
+ dev_warn(&info->xbdev->dev,
"only created %d queues\n", i);
*num_queues = i;
break;
@@ -1788,10 +1775,8 @@ static int xennet_create_queues(struct netfront_info *info,
netif_set_real_num_tx_queues(info->netdev, *num_queues);
- rtnl_unlock();
-
if (*num_queues == 0) {
- dev_err(&info->netdev->dev, "no queues\n");
+ dev_err(&info->xbdev->dev, "no queues\n");
return -EINVAL;
}
return 0;
@@ -1828,6 +1813,7 @@ static int talk_to_netback(struct xenbus_device *dev,
goto out;
}
+ rtnl_lock();
if (info->queues)
xennet_destroy_queues(info);
@@ -1838,6 +1824,7 @@ static int talk_to_netback(struct xenbus_device *dev,
info->queues = NULL;
goto out;
}
+ rtnl_unlock();
/* Create shared ring, alloc event channel -- for each queue */
for (i = 0; i < num_queues; ++i) {
@@ -1934,8 +1921,10 @@ abort_transaction_no_dev_fatal:
xenbus_transaction_end(xbt, 1);
destroy_ring:
xennet_disconnect_backend(info);
+ rtnl_lock();
xennet_destroy_queues(info);
out:
+ rtnl_unlock();
device_unregister(&dev->dev);
return err;
}
@@ -1965,6 +1954,15 @@ static int xennet_connect(struct net_device *dev)
netdev_update_features(dev);
rtnl_unlock();
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ err = register_netdev(dev);
+ if (err) {
+ pr_warn("%s: register_netdev err=%d\n", __func__, err);
+ device_unregister(&np->xbdev->dev);
+ return err;
+ }
+ }
+
/*
* All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
@@ -2150,10 +2148,14 @@ static int xennet_remove(struct xenbus_device *dev)
xennet_disconnect_backend(info);
- unregister_netdev(info->netdev);
+ if (info->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(info->netdev);
- if (info->queues)
+ if (info->queues) {
+ rtnl_lock();
xennet_destroy_queues(info);
+ rtnl_unlock();
+ }
xennet_free_netdev(info->netdev);
return 0;
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 3903d90fe51c..41713f16ff97 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -385,6 +385,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
acpi_handle *phandle = (acpi_handle *)context;
+ unsigned long long current_status = 0;
acpi_status status;
struct acpi_device_info *info;
int retval = 0;
@@ -396,7 +397,9 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
return retval;
}
- if (info->current_status && (info->valid & ACPI_VALID_HID) &&
+ acpi_bus_get_status_handle(handle, &current_status);
+
+ if (current_status && (info->valid & ACPI_VALID_HID) &&
(!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
!strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
pr_debug("found hardware: %s, handle: %p\n",
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 5ef7b46a2578..49377d502b74 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -367,7 +367,7 @@ static int do_validate_mem(struct pcmcia_socket *s,
}
}
- dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
+ dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %pr %pr %u %u %u",
base, base+size-1, res1, res2, ret, info1, info2);
free_region(res2);
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 764650eb8897..c5f2344c189b 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -191,12 +191,16 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret = 0, i;
- clk_prepare_enable(skt->clk);
+ ret = clk_prepare_enable(skt->clk);
+ if (ret)
+ return ret;
if (skt->ops->hw_init) {
ret = skt->ops->hw_init(skt);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(skt->clk);
return ret;
+ }
}
for (i = 0; i < ARRAY_SIZE(skt->stat); i++) {
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index d8599736a41a..6dec6ab13300 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -423,7 +423,7 @@ static int chromeos_laptop_probe(struct platform_device *pdev)
return ret;
}
-static struct chromeos_laptop samsung_series_5_550 = {
+static const struct chromeos_laptop samsung_series_5_550 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
@@ -432,14 +432,14 @@ static struct chromeos_laptop samsung_series_5_550 = {
},
};
-static struct chromeos_laptop samsung_series_5 = {
+static const struct chromeos_laptop samsung_series_5 = {
.i2c_peripherals = {
/* Light Sensor. */
{ .add = setup_tsl2583_als, I2C_ADAPTER_SMBUS },
},
};
-static struct chromeos_laptop chromebook_pixel = {
+static const struct chromeos_laptop chromebook_pixel = {
.i2c_peripherals = {
/* Touch Screen. */
{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_PANEL },
@@ -450,14 +450,14 @@ static struct chromeos_laptop chromebook_pixel = {
},
};
-static struct chromeos_laptop hp_chromebook_14 = {
+static const struct chromeos_laptop hp_chromebook_14 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
},
};
-static struct chromeos_laptop dell_chromebook_11 = {
+static const struct chromeos_laptop dell_chromebook_11 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
@@ -466,28 +466,28 @@ static struct chromeos_laptop dell_chromebook_11 = {
},
};
-static struct chromeos_laptop toshiba_cb35 = {
+static const struct chromeos_laptop toshiba_cb35 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
},
};
-static struct chromeos_laptop acer_c7_chromebook = {
+static const struct chromeos_laptop acer_c7_chromebook = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
},
};
-static struct chromeos_laptop acer_ac700 = {
+static const struct chromeos_laptop acer_ac700 = {
.i2c_peripherals = {
/* Light Sensor. */
{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
},
};
-static struct chromeos_laptop acer_c720 = {
+static const struct chromeos_laptop acer_c720 = {
.i2c_peripherals = {
/* Touchscreen. */
{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
@@ -500,14 +500,14 @@ static struct chromeos_laptop acer_c720 = {
},
};
-static struct chromeos_laptop hp_pavilion_14_chromebook = {
+static const struct chromeos_laptop hp_pavilion_14_chromebook = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_SMBUS },
},
};
-static struct chromeos_laptop cr48 = {
+static const struct chromeos_laptop cr48 = {
.i2c_peripherals = {
/* Light Sensor. */
{ .add = setup_tsl2563_als, I2C_ADAPTER_SMBUS },
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 1baf720faf69..af89e82eecd2 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -35,6 +35,9 @@
#define DRV_NAME "cros_ec_lpcs"
#define ACPI_DRV_NAME "GOOG0004"
+/* True if ACPI device is present */
+static bool cros_ec_lpc_acpi_device_found;
+
static int ec_response_timed_out(void)
{
unsigned long one_second = jiffies + HZ;
@@ -54,7 +57,6 @@ static int ec_response_timed_out(void)
static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
struct cros_ec_command *msg)
{
- struct ec_host_request *request;
struct ec_host_response response;
u8 sum;
int ret = 0;
@@ -65,8 +67,6 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec,
/* Write buffer */
cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout);
- request = (struct ec_host_request *)ec->dout;
-
/* Here we go */
sum = EC_COMMAND_PROTOCOL_3;
cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum);
@@ -362,6 +362,13 @@ static const struct dmi_system_id cros_ec_lpc_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "Peppy"),
},
},
+ {
+ /* x86-glimmer, the Lenovo Thinkpad Yoga 11e. */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Glimmer"),
+ },
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table);
@@ -396,9 +403,21 @@ static struct platform_driver cros_ec_lpc_driver = {
.remove = cros_ec_lpc_remove,
};
+static struct platform_device cros_ec_lpc_device = {
+ .name = DRV_NAME
+};
+
+static acpi_status cros_ec_lpc_parse_device(acpi_handle handle, u32 level,
+ void *context, void **retval)
+{
+ *(bool *)context = true;
+ return AE_CTRL_TERMINATE;
+}
+
static int __init cros_ec_lpc_init(void)
{
int ret;
+ acpi_status status;
if (!dmi_check_system(cros_ec_lpc_dmi_table)) {
pr_err(DRV_NAME ": unsupported system.\n");
@@ -415,11 +434,28 @@ static int __init cros_ec_lpc_init(void)
return ret;
}
- return 0;
+ status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
+ &cros_ec_lpc_acpi_device_found, NULL);
+ if (ACPI_FAILURE(status))
+ pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
+
+ if (!cros_ec_lpc_acpi_device_found) {
+ /* Register the device, and it'll get hooked up automatically */
+ ret = platform_device_register(&cros_ec_lpc_device);
+ if (ret) {
+ pr_err(DRV_NAME ": can't register device: %d\n", ret);
+ platform_driver_unregister(&cros_ec_lpc_driver);
+ cros_ec_lpc_reg_destroy();
+ }
+ }
+
+ return ret;
}
static void __exit cros_ec_lpc_exit(void)
{
+ if (!cros_ec_lpc_acpi_device_found)
+ platform_device_unregister(&cros_ec_lpc_device);
platform_driver_unregister(&cros_ec_lpc_driver);
cros_ec_lpc_reg_destroy();
}
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 8dfa7fcb1248..e7bbdf947bbc 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -60,12 +60,14 @@ static int send_command(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
{
int ret;
+ int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg);
if (ec_dev->proto_version > 2)
- ret = ec_dev->pkt_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->pkt_xfer;
else
- ret = ec_dev->cmd_xfer(ec_dev, msg);
+ xfer_fxn = ec_dev->cmd_xfer;
+ ret = (*xfer_fxn)(ec_dev, msg);
if (msg->result == EC_RES_IN_PROGRESS) {
int i;
struct cros_ec_command *status_msg;
@@ -88,7 +90,7 @@ static int send_command(struct cros_ec_device *ec_dev,
for (i = 0; i < EC_COMMAND_RETRIES; i++) {
usleep_range(10000, 11000);
- ret = ec_dev->cmd_xfer(ec_dev, status_msg);
+ ret = (*xfer_fxn)(ec_dev, status_msg);
if (ret < 0)
break;
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index d6eebe872187..da0a719d32f7 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -185,7 +185,7 @@ static ssize_t show_ec_version(struct device *dev,
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: EC error %d\n", msg->result);
else {
- msg->data[sizeof(msg->data) - 1] = '\0';
+ msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: %s\n", msg->data);
}
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 0dfa1ca0d05b..313cf8ad77bf 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -300,7 +300,7 @@ mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
{
struct mlxreg_core_data *data = item->data;
u32 regval;
- int i, ret;
+ int i, ret = 0;
for (i = 0; i < item->count; i++, data++) {
/* Mask event. */
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 27de29961f5e..454e14f02285 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -77,10 +77,13 @@
#define MLXPLAT_CPLD_AGGR_FAN_MASK_DEF 0x40
#define MLXPLAT_CPLD_AGGR_MASK_DEF (MLXPLAT_CPLD_AGGR_PSU_MASK_DEF | \
MLXPLAT_CPLD_AGGR_FAN_MASK_DEF)
+#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc0
#define MLXPLAT_CPLD_AGGR_MASK_MSN21XX 0x04
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
+#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
/* Start channel numbers */
#define MLXPLAT_CPLD_CH1 2
@@ -89,6 +92,15 @@
/* Number of LPC attached MUX platform devices */
#define MLXPLAT_CPLD_LPC_MUX_DEVS 2
+/* Hotplug devices adapter numbers */
+#define MLXPLAT_CPLD_NR_NONE -1
+#define MLXPLAT_CPLD_PSU_DEFAULT_NR 10
+#define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4
+#define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11
+#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
+#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
+#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -159,6 +171,15 @@ static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
},
};
+static struct i2c_board_info mlxplat_mlxcpld_ng_psu[] = {
+ {
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x50),
+ },
+};
+
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
I2C_BOARD_INFO("dps460", 0x59),
@@ -190,14 +211,14 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
- .hpdev.nr = 10,
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
{
.label = "psu2",
.reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
- .hpdev.nr = 10,
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
};
@@ -207,14 +228,14 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_items_data[] = {
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
- .hpdev.nr = 10,
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
{
.label = "pwr2",
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
- .hpdev.nr = 10,
+ .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
},
};
@@ -224,28 +245,28 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[0],
- .hpdev.nr = 11,
+ .hpdev.nr = MLXPLAT_CPLD_FAN1_DEFAULT_NR,
},
{
.label = "fan2",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[1],
- .hpdev.nr = 12,
+ .hpdev.nr = MLXPLAT_CPLD_FAN2_DEFAULT_NR,
},
{
.label = "fan3",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[2],
- .hpdev.nr = 13,
+ .hpdev.nr = MLXPLAT_CPLD_FAN3_DEFAULT_NR,
},
{
.label = "fan4",
.reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &mlxplat_mlxcpld_fan[3],
- .hpdev.nr = 14,
+ .hpdev.nr = MLXPLAT_CPLD_FAN4_DEFAULT_NR,
},
};
@@ -287,14 +308,29 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
};
+static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
/* Platform hotplug MSN21xx system family data */
static struct mlxreg_core_item mlxplat_mlxcpld_msn21xx_items[] = {
{
- .data = mlxplat_mlxcpld_default_pwr_items_data,
+ .data = mlxplat_mlxcpld_msn21xx_pwr_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
.reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
.mask = MLXPLAT_CPLD_PWR_MASK,
- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_pwr_items_data),
.inversed = 0,
.health = false,
},
@@ -306,6 +342,245 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn21xx_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug msn274x system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_msn274x_items[] = {
+ {
+ .data = mlxplat_mlxcpld_msn274x_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_msn274x_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn274x_data = {
+ .items = mlxplat_mlxcpld_msn274x_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn274x_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug MSN201x system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_msn201x_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_msn201x_items[] = {
+ {
+ .data = mlxplat_mlxcpld_msn201x_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_msn201x_data = {
+ .items = mlxplat_mlxcpld_msn21xx_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_msn201x_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Platform hotplug next generation system family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_psu_items_data[] = {
+ {
+ .label = "psu1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "psu2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ng_psu[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = {
+ {
+ .label = "fan1",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan2",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan3",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan4",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan5",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(4),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "fan6",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(5),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
+ {
+ .data = mlxplat_mlxcpld_default_ng_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = {
+ .items = mlxplat_mlxcpld_default_ng_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
@@ -437,8 +712,57 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
return 1;
};
+static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_msn274x_data;
+
+ return 1;
+};
+
+static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data;
+
+ return 1;
+};
+
+static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
+ mlxplat_mux_data[i].values = mlxplat_msn21xx_channels;
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_msn21xx_channels);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data;
+
+ return 1;
+};
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
+ .callback = mlxplat_dmi_msn274x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN274"),
+ },
+ },
+ {
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -473,6 +797,34 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "MSN21"),
},
},
+ {
+ .callback = mlxplat_dmi_msn201x_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN201"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "QMB7"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SN37"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_qmb7xx_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "SN34"),
+ },
+ },
{ }
};
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 614b44e70a28..a2b33a22c82a 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -19,6 +19,8 @@ endif
CFLAGS_sclp_early_core.o += -D__NO_FORTIFY
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index d06bc5674e5f..6b1891539c84 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -49,7 +49,7 @@ struct read_info_sccb {
u8 _pad_112[116 - 112]; /* 112-115 */
u8 fac116; /* 116 */
u8 fac117; /* 117 */
- u8 _pad_118; /* 118 */
+ u8 fac118; /* 118 */
u8 fac119; /* 119 */
u16 hcpua; /* 120-121 */
u8 _pad_122[124 - 122]; /* 122-123 */
@@ -100,6 +100,7 @@ static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
sclp.has_esca = !!(sccb->fac116 & 0x08);
sclp.has_pfmfi = !!(sccb->fac117 & 0x40);
sclp.has_ibs = !!(sccb->fac117 & 0x20);
+ sclp.has_gisaf = !!(sccb->fac118 & 0x08);
sclp.has_hvs = !!(sccb->fac119 & 0x80);
sclp.has_kss = !!(sccb->fac98 & 0x01);
if (sccb->fac85 & 0x02)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 5c94a3aec4dd..f95b452b8bbc 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -412,7 +412,7 @@ static void chp_release(struct device *dev)
/**
* chp_update_desc - update channel-path description
- * @chp - channel-path
+ * @chp: channel-path
*
* Update the channel-path description of the specified channel-path
* including channel measurement related information.
@@ -438,7 +438,7 @@ int chp_update_desc(struct channel_path *chp)
/**
* chp_new - register a new channel-path
- * @chpid - channel-path ID
+ * @chpid: channel-path ID
*
* Create and register data structure representing new channel-path. Return
* zero on success, non-zero otherwise.
@@ -730,8 +730,8 @@ static void cfg_func(struct work_struct *work)
/**
* chp_cfg_schedule - schedule chpid configuration request
- * @chpid - channel-path ID
- * @configure - Non-zero for configure, zero for deconfigure
+ * @chpid: channel-path ID
+ * @configure: Non-zero for configure, zero for deconfigure
*
* Schedule a channel-path configuration/deconfiguration request.
*/
@@ -747,7 +747,7 @@ void chp_cfg_schedule(struct chp_id chpid, int configure)
/**
* chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request
- * @chpid - channel-path ID
+ * @chpid: channel-path ID
*
* Cancel an active channel-path deconfiguration request if it has not yet
* been performed.
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 987bf9a8c9f7..6886b3d34cf8 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1059,7 +1059,7 @@ EXPORT_SYMBOL_GPL(cio_tm_start_key);
/**
* cio_tm_intrg - perform interrogate function
- * @sch - subchannel on which to perform the interrogate function
+ * @sch: subchannel on which to perform the interrogate function
*
* If the specified subchannel is running in transport-mode, perform the
* interrogate function. Return zero on success, non-zero otherwie.
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 5e495c62cfa7..8af4948dae80 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1118,9 +1118,10 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable)
* enable_cmf() - switch on the channel measurement for a specific device
* @cdev: The ccw device to be enabled
*
- * Returns %0 for success or a negative error value.
- * Note: If this is called on a device for which channel measurement is already
- * enabled a reset of the measurement data is triggered.
+ * Enable channel measurements for @cdev. If this is called on a device
+ * for which channel measurement is already enabled a reset of the
+ * measurement data is triggered.
+ * Returns: %0 for success or a negative error value.
* Context:
* non-atomic
*/
@@ -1160,7 +1161,7 @@ out_unlock:
* __disable_cmf() - switch off the channel measurement for a specific device
* @cdev: The ccw device to be disabled
*
- * Returns %0 for success or a negative error value.
+ * Returns: %0 for success or a negative error value.
*
* Context:
* non-atomic, device_lock() held.
@@ -1184,7 +1185,7 @@ int __disable_cmf(struct ccw_device *cdev)
* disable_cmf() - switch off the channel measurement for a specific device
* @cdev: The ccw device to be disabled
*
- * Returns %0 for success or a negative error value.
+ * Returns: %0 for success or a negative error value.
*
* Context:
* non-atomic
@@ -1205,7 +1206,7 @@ int disable_cmf(struct ccw_device *cdev)
* @cdev: the channel to be read
* @index: the index of the value to be read
*
- * Returns the value read or %0 if the value cannot be read.
+ * Returns: The value read or %0 if the value cannot be read.
*
* Context:
* any
@@ -1220,7 +1221,7 @@ u64 cmf_read(struct ccw_device *cdev, int index)
* @cdev: the channel to be read
* @data: a pointer to a data block that will be filled
*
- * Returns %0 on success, a negative error value otherwise.
+ * Returns: %0 on success, a negative error value otherwise.
*
* Context:
* any
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index deaf59f93326..19e46363348c 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -15,7 +15,7 @@
#include <asm/fcx.h>
#include <asm/itcw.h>
-/**
+/*
* struct itcw - incremental tcw helper data type
*
* This structure serves as a handle for the incremental construction of a
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 95b0efe28afb..d5b02de02a3a 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -72,6 +72,7 @@ static inline int do_siga_input(unsigned long schid, unsigned int mask,
* @mask: which output queues to process
* @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
* @fc: function code to perform
+ * @aob: asynchronous operation block
*
* Returns condition code.
* Note: For IQDC unicast queues only the highest priority queue is processed.
@@ -1761,9 +1762,6 @@ EXPORT_SYMBOL(qdio_stop_irq);
* @response: Response code will be stored at this address
* @cb: Callback function will be executed for each element
* of the address list
- * @priv: Pointer passed from the caller to qdio_pnso_brinfo()
- * @type: Type of the address entry passed to the callback
- * @entry: Entry containg the address of the specified type
* @priv: Pointer to pass to the callback function.
*
* Performs "Store-network-bridging-information list" operation and calls
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index d9a2fffd034b..2c7550797ec2 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -835,7 +835,7 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
/**
* cp_iova_pinned() - check if an iova is pinned for a ccw chain.
- * @cmd: ccwchain command on which to perform the operation
+ * @cp: channel_program on which to perform the operation
* @iova: the iova to check
*
* If the @iova is currently pinned for the ccw chain, return true;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index db42107bf2f5..959c65cf75d9 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -591,6 +591,11 @@ struct qeth_cmd_buffer {
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
};
+static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
+{
+ return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
+}
+
/**
* definition of a qeth channel, used for read and write
*/
@@ -846,7 +851,7 @@ struct qeth_trap_id {
*/
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
{
- return PFN_UP(end - 1) - PFN_DOWN(start);
+ return PFN_UP(end) - PFN_DOWN(start);
}
static inline int qeth_get_micros(void)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6abd3bc285e4..ca72f3311004 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2120,7 +2120,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd;
+ struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
@@ -2146,10 +2146,13 @@ int qeth_send_control_data(struct qeth_card *card, int len,
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
qeth_prepare_control_data(card, len, iob);
- if (IS_IPA(iob->data))
+ if (IS_IPA(iob->data)) {
+ cmd = __ipa_cmd(iob);
event_timeout = QETH_IPA_TIMEOUT;
- else
+ } else {
event_timeout = QETH_TIMEOUT;
+ }
+
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
@@ -2174,9 +2177,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
/* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- if ((cmd->hdr.command == IPA_CMD_SETIP) &&
- (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
+ if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
+ cmd->hdr.prot_version == QETH_PROT_IPV4) {
if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout))
goto time_err;
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 92863e3818e5..9475353f49d6 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -197,10 +197,11 @@ int clk_rate_table_find(struct clk *clk,
unsigned long rate)
{
struct cpufreq_frequency_table *pos;
+ int idx;
- cpufreq_for_each_valid_entry(pos, freq_table)
+ cpufreq_for_each_valid_entry_idx(pos, freq_table, idx)
if (pos->frequency == rate)
- return pos - freq_table;
+ return idx;
return -ENOENT;
}
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index ee18428a051f..b3f5cae98ea6 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -31,7 +31,7 @@ config SSB_BLOCKIO
config SSB_PCIHOST_POSSIBLE
bool
- depends on SSB && (PCI = y || PCI = SSB) && PCI_DRIVERS_LEGACY
+ depends on SSB && (PCI = y || PCI = SSB) && (PCI_DRIVERS_LEGACY || !MIPS)
default y
config SSB_PCIHOST
diff --git a/drivers/staging/irda/drivers/sh_sir.c b/drivers/staging/irda/drivers/sh_sir.c
index fede6864c737..0d0687cc454a 100644
--- a/drivers/staging/irda/drivers/sh_sir.c
+++ b/drivers/staging/irda/drivers/sh_sir.c
@@ -226,7 +226,7 @@ static u32 sh_sir_find_sclk(struct clk *irda_clk)
clk_put(pclk);
/* IrDA can not set over peripheral_clk */
- cpufreq_for_each_valid_entry(pos, freq_table) {
+ cpufreq_for_each_valid_entry_idx(pos, freq_table, index) {
u32 freq = pos->frequency;
/* IrDA should not over peripheral_clk */
@@ -236,7 +236,7 @@ static u32 sh_sir_find_sclk(struct clk *irda_clk)
tmp = freq % SCLK_BASE;
if (tmp < min) {
min = tmp;
- index = pos - freq_table;
+ break;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
index 282e293da18f..7ac42a590e21 100644
--- a/drivers/staging/rtl8192e/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -6,4 +6,3 @@ config RTL8192E
select WEXT_PRIV
select CRYPTO
select FW_LOADER
- ---help---
diff --git a/drivers/staging/rtl8192u/Kconfig b/drivers/staging/rtl8192u/Kconfig
index 3ee9d0d00fb6..97df6507a485 100644
--- a/drivers/staging/rtl8192u/Kconfig
+++ b/drivers/staging/rtl8192u/Kconfig
@@ -5,4 +5,3 @@ config RTL8192U
select WIRELESS_EXT
select WEXT_PRIV
select CRYPTO
- ---help---
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 92eb57e2adaf..8de16016b6de 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -893,6 +893,9 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
return -ENODEV;
rcu_read_lock();
+ if (!(n->nud_state & NUD_VALID))
+ neigh_event_send(n, NULL);
+
ret = -ENOMEM;
if (n->dev->flags & IFF_LOOPBACK) {
if (iptype == 4)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index f9bc8ec6fb6b..9518ffd8b8ba 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -421,7 +421,8 @@ static int chap_server_compute_md5(
auth_ret = 0;
out:
kzfree(desc);
- crypto_free_shash(tfm);
+ if (tfm)
+ crypto_free_shash(tfm);
kfree(challenge);
kfree(challenge_binhex);
return auth_ret;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index b686e2ce9c0e..8a5e8d17a942 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -432,6 +432,9 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
if (test_and_set_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
write_unlock_bh(&sk->sk_callback_lock);
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1, conn: %p >>>>\n", conn);
+ if (iscsi_target_sk_data_ready == conn->orig_data_ready)
+ return;
+ conn->orig_data_ready(sk);
return;
}
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index b6a913e38b30..9cd4ffe76c07 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -64,7 +64,7 @@ static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- seq_printf(m, "tcm_loop_proc_info()\n");
+ seq_puts(m, "tcm_loop_proc_info()\n");
return 0;
}
@@ -123,8 +123,8 @@ static void tcm_loop_submission_work(struct work_struct *work)
}
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
- scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
- " does not exist\n");
+ scmd_printk(KERN_ERR, sc,
+ "TCM_Loop I_T Nexus does not exist\n");
set_host_byte(sc, DID_ERROR);
goto out_done;
}
@@ -166,7 +166,6 @@ static void tcm_loop_submission_work(struct work_struct *work)
out_done:
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
sc->scsi_done(sc);
- return;
}
/*
@@ -177,14 +176,13 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct tcm_loop_cmd *tl_cmd;
- pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
- " scsi_buf_len: %u\n", sc->device->host->host_no,
- sc->device->id, sc->device->channel, sc->device->lun,
- sc->cmnd[0], scsi_bufflen(sc));
+ pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
+ __func__, sc->device->host->host_no, sc->device->id,
+ sc->device->channel, sc->device->lun, sc->cmnd[0],
+ scsi_bufflen(sc));
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
- pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
sc->scsi_done(sc);
return 0;
@@ -204,10 +202,10 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
u64 lun, int task, enum tcm_tmreq_table tmr)
{
- struct se_cmd *se_cmd = NULL;
+ struct se_cmd *se_cmd;
struct se_session *se_sess;
struct tcm_loop_nexus *tl_nexus;
- struct tcm_loop_cmd *tl_cmd = NULL;
+ struct tcm_loop_cmd *tl_cmd;
int ret = TMR_FUNCTION_FAILED, rc;
/*
@@ -215,16 +213,13 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
*/
tl_nexus = tl_tpg->tl_nexus;
if (!tl_nexus) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
+ pr_err("Unable to perform device reset without active I_T Nexus\n");
return ret;
}
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
- if (!tl_cmd) {
- pr_err("Unable to allocate memory for tl_cmd\n");
+ if (!tl_cmd)
return ret;
- }
init_completion(&tl_cmd->tmr_done);
@@ -298,8 +293,7 @@ static int tcm_loop_target_reset(struct scsi_cmnd *sc)
*/
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
if (!tl_hba) {
- pr_err("Unable to perform device reset without"
- " active I_T Nexus\n");
+ pr_err("Unable to perform device reset without active I_T Nexus\n");
return FAILED;
}
/*
@@ -417,8 +411,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
- pr_err("device_register() failed for"
- " tl_hba->dev: %d\n", ret);
+ pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
return -ENODEV;
}
@@ -447,8 +440,7 @@ static int tcm_loop_alloc_core_bus(void)
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
- pr_err("driver_register() failed for"
- "tcm_loop_driverfs\n");
+ pr_err("driver_register() failed for tcm_loop_driverfs\n");
goto bus_unreg;
}
@@ -587,8 +579,8 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
- " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+ pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
+ __func__, sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
set_host_byte(sc, DID_OK);
@@ -605,8 +597,8 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
- " cdb: 0x%02x\n", sc, sc->cmnd[0]);
+ pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
+ __func__, sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
@@ -691,8 +683,8 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
- pr_err("Unable to locate struct scsi_device for %d:%d:"
- "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
+ pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
+ 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
/*
@@ -772,11 +764,9 @@ static int tcm_loop_make_nexus(
return -EEXIST;
}
- tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
- if (!tl_nexus) {
- pr_err("Unable to allocate struct tcm_loop_nexus\n");
+ tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
+ if (!tl_nexus)
return -ENOMEM;
- }
tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
@@ -787,9 +777,8 @@ static int tcm_loop_make_nexus(
return ret;
}
- pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
- name);
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
+ tcm_loop_dump_proto_id(tl_hba), name);
return 0;
}
@@ -808,15 +797,14 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
- pr_err("Unable to remove TCM_Loop I_T Nexus with"
- " active TPG port count: %d\n",
- atomic_read(&tpg->tl_tpg_port_count));
+ pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
+ atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
- pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
- " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
- tl_nexus->se_sess->se_node_acl->initiatorname);
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
+ tcm_loop_dump_proto_id(tpg->tl_hba),
+ tl_nexus->se_sess->se_node_acl->initiatorname);
/*
* Release the SCSI I_T Nexus to the emulated Target Port
*/
@@ -868,8 +856,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
- pr_err("Emulated NAA Sas Address: %s, exceeds"
- " max: %d\n", page, TL_WWN_ADDR_LEN);
+ pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
+ page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
@@ -877,9 +865,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
- pr_err("Passed SAS Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
@@ -888,9 +875,8 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
- pr_err("Passed FCP Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -899,16 +885,15 @@ static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
- pr_err("Passed iSCSI Initiator Port %s does not"
- " match target port protoid: %s\n", i_port,
- tcm_loop_dump_proto_id(tl_hba));
+ pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
+ i_port, tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
}
port_ptr = &i_port[0];
goto check_newline;
}
- pr_err("Unable to locate prefix for emulated Initiator Port:"
- " %s\n", i_port);
+ pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
+ i_port);
return -EINVAL;
/*
* Clear any trailing newline for the NAA WWN
@@ -1010,16 +995,15 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
unsigned long tpgt;
if (strstr(name, "tpgt_") != name) {
- pr_err("Unable to locate \"tpgt_#\" directory"
- " group\n");
+ pr_err("Unable to locate \"tpgt_#\" directory group\n");
return ERR_PTR(-EINVAL);
}
if (kstrtoul(name+5, 10, &tpgt))
return ERR_PTR(-EINVAL);
if (tpgt >= TL_TPGS_PER_HBA) {
- pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
- " %u\n", tpgt, TL_TPGS_PER_HBA);
+ pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
+ tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
@@ -1032,10 +1016,9 @@ static struct se_portal_group *tcm_loop_make_naa_tpg(
if (ret < 0)
return ERR_PTR(-ENOMEM);
- pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
- " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
- config_item_name(&wwn->wwn_group.cg_item), tpgt);
-
+ pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
+ tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
return &tl_tpg->tl_se_tpg;
}
@@ -1062,9 +1045,9 @@ static void tcm_loop_drop_naa_tpg(
tl_tpg->tl_hba = NULL;
tl_tpg->tl_tpgt = 0;
- pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
- " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
- config_item_name(&wwn->wwn_group.cg_item), tpgt);
+ pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
+ tcm_loop_dump_proto_id(tl_hba),
+ config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
/* End items for tcm_loop_naa_cit */
@@ -1081,11 +1064,10 @@ static struct se_wwn *tcm_loop_make_scsi_hba(
char *ptr;
int ret, off = 0;
- tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
- if (!tl_hba) {
- pr_err("Unable to allocate struct tcm_loop_hba\n");
+ tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
+ if (!tl_hba)
return ERR_PTR(-ENOMEM);
- }
+
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name.
@@ -1103,8 +1085,8 @@ static struct se_wwn *tcm_loop_make_scsi_hba(
}
ptr = strstr(name, "iqn.");
if (!ptr) {
- pr_err("Unable to locate prefix for emulated Target "
- "Port: %s\n", name);
+ pr_err("Unable to locate prefix for emulated Target Port: %s\n",
+ name);
ret = -EINVAL;
goto out;
}
@@ -1112,9 +1094,8 @@ static struct se_wwn *tcm_loop_make_scsi_hba(
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
- pr_err("Emulated NAA %s Address: %s, exceeds"
- " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
- TL_WWN_ADDR_LEN);
+ pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
+ name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
ret = -EINVAL;
goto out;
}
@@ -1131,10 +1112,8 @@ check_len:
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
- pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
- " %s Address: %s at Linux/SCSI Host ID: %d\n",
- tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
-
+ pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
return &tl_hba->tl_hba_wwn;
out:
kfree(tl_hba);
@@ -1147,10 +1126,9 @@ static void tcm_loop_drop_scsi_hba(
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
- " %s Address: %s at Linux/SCSI Host ID: %d\n",
- tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
- tl_hba->sh->host_no);
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
+ tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
+ tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
@@ -1223,8 +1201,7 @@ static int __init tcm_loop_fabric_init(void)
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
- pr_debug("kmem_cache_create() for"
- " tcm_loop_cmd_cache failed\n");
+ pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
goto out_destroy_workqueue;
}
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index e5c3e5f827d0..fb1003921d85 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -201,10 +201,9 @@ static struct sbp_session *sbp_session_create(
snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
sess = kmalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess) {
- pr_err("failed to allocate session descriptor\n");
+ if (!sess)
return ERR_PTR(-ENOMEM);
- }
+
spin_lock_init(&sess->lock);
INIT_LIST_HEAD(&sess->login_list);
INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
@@ -2029,10 +2028,8 @@ static struct se_portal_group *sbp_make_tpg(
}
tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
- if (!tpg) {
- pr_err("Unable to allocate struct sbp_tpg\n");
+ if (!tpg)
return ERR_PTR(-ENOMEM);
- }
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
@@ -2088,10 +2085,8 @@ static struct se_wwn *sbp_make_tport(
return ERR_PTR(-EINVAL);
tport = kzalloc(sizeof(*tport), GFP_KERNEL);
- if (!tport) {
- pr_err("Unable to allocate struct sbp_tport\n");
+ if (!tport)
return ERR_PTR(-ENOMEM);
- }
tport->guid = guid;
sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 72b1cd1bf9d9..3f4bf126eed0 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1197,6 +1197,7 @@ struct configfs_attribute *passthrough_attrib_attrs[] = {
EXPORT_SYMBOL(passthrough_attrib_attrs);
TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
+TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
/* End functions for struct config_item_type tb_dev_attrib_cit */
@@ -2940,6 +2941,10 @@ static struct config_group *target_core_make_subdev(
config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
+ config_group_init_type_name(&dev->dev_action_group, "action",
+ &tb->tb_dev_action_cit);
+ configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
+
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
&tb->tb_dev_attrib_cit);
configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
@@ -3200,6 +3205,7 @@ static const struct config_item_type target_core_cit = {
void target_setup_backend_cits(struct target_backend *tb)
{
target_core_setup_dev_cit(tb);
+ target_core_setup_dev_action_cit(tb);
target_core_setup_dev_attrib_cit(tb);
target_core_setup_dev_pr_cit(tb);
target_core_setup_dev_wwn_cit(tb);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index e8dd6da164b2..e27db4d45a9d 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -997,7 +997,7 @@ int target_configure_device(struct se_device *dev)
ret = core_setup_alua(dev);
if (ret)
- goto out_free_index;
+ goto out_destroy_device;
/*
* Startup the struct se_device processing thread
@@ -1041,6 +1041,8 @@ int target_configure_device(struct se_device *dev)
out_free_alua:
core_alua_free_lu_gp_mem(dev);
+out_destroy_device:
+ dev->transport->destroy_device(dev);
out_free_index:
mutex_lock(&device_mutex);
idr_remove(&devices_idr, dev->dev_index);
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 508da345b73f..71a80257a052 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -273,7 +273,7 @@ static int iscsi_get_pr_transport_id_len(
static char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
- const char *buf,
+ char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
@@ -356,7 +356,7 @@ static char *iscsi_parse_pr_out_transport_id(
}
}
- return (char *)&buf[4];
+ return &buf[4];
}
int target_get_pr_transport_id_len(struct se_node_acl *nacl,
@@ -405,7 +405,7 @@ int target_get_pr_transport_id(struct se_node_acl *nacl,
}
const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
- const char *buf, u32 *out_tid_len, char **port_nexus_ptr)
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr)
{
u32 offset;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 9384d19a7326..1d5afc3ae017 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -17,6 +17,7 @@ struct target_backend {
struct config_item_type tb_dev_cit;
struct config_item_type tb_dev_attrib_cit;
+ struct config_item_type tb_dev_action_cit;
struct config_item_type tb_dev_pr_cit;
struct config_item_type tb_dev_wwn_cit;
struct config_item_type tb_dev_alua_tg_pt_gps_cit;
@@ -102,7 +103,7 @@ int target_get_pr_transport_id(struct se_node_acl *nacl,
struct t10_pr_registration *pr_reg, int *format_code,
unsigned char *buf);
const char *target_parse_pr_out_transport_id(struct se_portal_group *tpg,
- const char *buf, u32 *out_tid_len, char **port_nexus_ptr);
+ char *buf, u32 *out_tid_len, char **port_nexus_ptr);
/* target_core_hba.c */
struct se_hba *core_alloc_hba(const char *, u32, u32);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b024613f9217..01ac306131c1 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1601,7 +1601,7 @@ core_scsi3_decode_spec_i_port(
dest_rtpi = tmp_lun->lun_rtpi;
i_str = target_parse_pr_out_transport_id(tmp_tpg,
- (const char *)ptr, &tid_len, &iport_ptr);
+ ptr, &tid_len, &iport_ptr);
if (!i_str)
continue;
@@ -3287,7 +3287,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
goto out;
}
initiator_str = target_parse_pr_out_transport_id(dest_se_tpg,
- (const char *)&buf[24], &tmp_tid_len, &iport_ptr);
+ &buf[24], &tmp_tid_len, &iport_ptr);
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 750a04ed0e93..b054682e974f 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1216,9 +1216,11 @@ sbc_execute_unmap(struct se_cmd *cmd)
goto err;
}
- ret = ops->execute_unmap(cmd, lba, range);
- if (ret)
- goto err;
+ if (range) {
+ ret = ops->execute_unmap(cmd, lba, range);
+ if (ret)
+ goto err;
+ }
ptr += 16;
size -= 16;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c03a78ee26cd..4558f2e1fe1b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1774,6 +1774,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
goto queue_status;
+ case TCM_LUN_BUSY:
+ cmd->scsi_status = SAM_STAT_BUSY;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index a415d87f22d2..4ad89ea71a70 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -32,7 +32,7 @@
#include <linux/highmem.h>
#include <linux/configfs.h>
#include <linux/mutex.h>
-#include <linux/kthread.h>
+#include <linux/workqueue.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -77,15 +77,21 @@
* the total size is 256K * PAGE_SIZE.
*/
#define DATA_BLOCK_SIZE PAGE_SIZE
-#define DATA_BLOCK_BITS (256 * 1024)
+#define DATA_BLOCK_SHIFT PAGE_SHIFT
+#define DATA_BLOCK_BITS_DEF (256 * 1024)
#define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
-#define DATA_BLOCK_INIT_BITS 128
+
+#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT))
+#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT))
/* The total size of the ring is 8M + 256K * PAGE_SIZE */
#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
-/* Default maximum of the global data blocks(512K * PAGE_SIZE) */
-#define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
+/*
+ * Default number of global data blocks(512K * PAGE_SIZE)
+ * when the unmap thread will be started.
+ */
+#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024)
static u8 tcmu_kern_cmd_reply_supported;
@@ -107,6 +113,7 @@ struct tcmu_nl_cmd {
struct tcmu_dev {
struct list_head node;
struct kref kref;
+
struct se_device se_dev;
char *name;
@@ -114,6 +121,7 @@ struct tcmu_dev {
#define TCMU_DEV_BIT_OPEN 0
#define TCMU_DEV_BIT_BROKEN 1
+#define TCMU_DEV_BIT_BLOCKED 2
unsigned long flags;
struct uio_info uio_info;
@@ -128,22 +136,27 @@ struct tcmu_dev {
/* Must add data_off and mb_addr to get the address */
size_t data_off;
size_t data_size;
+ uint32_t max_blocks;
+ size_t ring_size;
- wait_queue_head_t wait_cmdr;
struct mutex cmdr_lock;
+ struct list_head cmdr_queue;
- bool waiting_global;
uint32_t dbi_max;
uint32_t dbi_thresh;
- DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
+ unsigned long *data_bitmap;
struct radix_tree_root data_blocks;
struct idr commands;
- spinlock_t commands_lock;
- struct timer_list timeout;
+ struct timer_list cmd_timer;
unsigned int cmd_time_out;
+ struct timer_list qfull_timer;
+ int qfull_time_out;
+
+ struct list_head timedout_entry;
+
spinlock_t nl_cmd_lock;
struct tcmu_nl_cmd curr_nl_cmd;
/* wake up threads waiting on curr_nl_cmd */
@@ -161,6 +174,7 @@ struct tcmu_dev {
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
+ struct list_head cmdr_queue_entry;
uint16_t cmd_id;
@@ -175,16 +189,68 @@ struct tcmu_cmd {
#define TCMU_CMD_BIT_EXPIRED 0
unsigned long flags;
};
-
-static struct task_struct *unmap_thread;
-static wait_queue_head_t unmap_wait;
+/*
+ * To avoid dead lock the mutex lock order should always be:
+ *
+ * mutex_lock(&root_udev_mutex);
+ * ...
+ * mutex_lock(&tcmu_dev->cmdr_lock);
+ * mutex_unlock(&tcmu_dev->cmdr_lock);
+ * ...
+ * mutex_unlock(&root_udev_mutex);
+ */
static DEFINE_MUTEX(root_udev_mutex);
static LIST_HEAD(root_udev);
-static atomic_t global_db_count = ATOMIC_INIT(0);
+static DEFINE_SPINLOCK(timed_out_udevs_lock);
+static LIST_HEAD(timed_out_udevs);
static struct kmem_cache *tcmu_cmd_cache;
+static atomic_t global_db_count = ATOMIC_INIT(0);
+static struct delayed_work tcmu_unmap_work;
+static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF;
+
+static int tcmu_set_global_max_data_area(const char *str,
+ const struct kernel_param *kp)
+{
+ int ret, max_area_mb;
+
+ ret = kstrtoint(str, 10, &max_area_mb);
+ if (ret)
+ return -EINVAL;
+
+ if (max_area_mb <= 0) {
+ pr_err("global_max_data_area must be larger than 0.\n");
+ return -EINVAL;
+ }
+
+ tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb);
+ if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+ else
+ cancel_delayed_work_sync(&tcmu_unmap_work);
+
+ return 0;
+}
+
+static int tcmu_get_global_max_data_area(char *buffer,
+ const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+}
+
+static const struct kernel_param_ops tcmu_global_max_data_area_op = {
+ .set = tcmu_set_global_max_data_area,
+ .get = tcmu_get_global_max_data_area,
+};
+
+module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(global_max_data_area_mb,
+ "Max MBs allowed to be allocated to all the tcmu device's "
+ "data areas.");
+
/* multicast group */
enum tcmu_multicast_groups {
TCMU_MCGRP_CONFIG,
@@ -345,10 +411,8 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
if (atomic_add_return(1, &global_db_count) >
- TCMU_GLOBAL_MAX_BLOCKS) {
- atomic_dec(&global_db_count);
- return false;
- }
+ tcmu_global_max_blocks)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
/* try to get new page from the mm */
page = alloc_page(GFP_KERNEL);
@@ -379,19 +443,11 @@ static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
{
int i;
- udev->waiting_global = false;
-
for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
if (!tcmu_get_empty_block(udev, tcmu_cmd))
- goto err;
+ return false;
}
return true;
-
-err:
- udev->waiting_global = true;
- /* Try to wake up the unmap thread */
- wake_up(&unmap_wait);
- return false;
}
static inline struct page *
@@ -437,6 +493,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
if (!tcmu_cmd)
return NULL;
+ INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
@@ -455,12 +512,13 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
{
unsigned long offset = offset_in_page(vaddr);
+ void *start = vaddr - offset;
size = round_up(size+offset, PAGE_SIZE);
- vaddr -= offset;
while (size) {
- flush_dcache_page(virt_to_page(vaddr));
+ flush_dcache_page(virt_to_page(start));
+ start += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
@@ -490,8 +548,7 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
-static inline void new_iov(struct iovec **iov, int *iov_cnt,
- struct tcmu_dev *udev)
+static inline void new_iov(struct iovec **iov, int *iov_cnt)
{
struct iovec *iovec;
@@ -518,7 +575,7 @@ static inline size_t iov_tail(struct iovec *iov)
return (size_t)iov->iov_base + iov->iov_len;
}
-static int scatter_data_area(struct tcmu_dev *udev,
+static void scatter_data_area(struct tcmu_dev *udev,
struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
unsigned int data_nents, struct iovec **iov,
int *iov_cnt, bool copy_data)
@@ -544,19 +601,38 @@ static int scatter_data_area(struct tcmu_dev *udev,
to = kmap_atomic(page);
}
- copy_bytes = min_t(size_t, sg_remaining,
- block_remaining);
+ /*
+ * Covert to virtual offset of the ring data area.
+ */
to_offset = get_block_offset_user(udev, dbi,
block_remaining);
+ /*
+ * The following code will gather and map the blocks
+ * to the same iovec when the blocks are all next to
+ * each other.
+ */
+ copy_bytes = min_t(size_t, sg_remaining,
+ block_remaining);
if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) {
+ /*
+ * Will append to the current iovec, because
+ * the current block page is next to the
+ * previous one.
+ */
(*iov)->iov_len += copy_bytes;
} else {
- new_iov(iov, iov_cnt, udev);
+ /*
+ * Will allocate a new iovec because we are
+ * first time here or the current block page
+ * is not next to the previous one.
+ */
+ new_iov(iov, iov_cnt);
(*iov)->iov_base = (void __user *)to_offset;
(*iov)->iov_len = copy_bytes;
}
+
if (copy_data) {
offset = DATA_BLOCK_SIZE - block_remaining;
memcpy(to + offset,
@@ -564,15 +640,15 @@ static int scatter_data_area(struct tcmu_dev *udev,
copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes);
}
+
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
}
kunmap_atomic(from - sg->offset);
}
+
if (to)
kunmap_atomic(to);
-
- return 0;
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
@@ -637,7 +713,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
{
- return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh));
+ return thresh - bitmap_weight(bitmap, thresh);
}
/*
@@ -677,9 +753,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
- if (space < data_needed) {
- unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh;
- unsigned long grow;
+ if ((space * DATA_BLOCK_SIZE) < data_needed) {
+ unsigned long blocks_left =
+ (udev->max_blocks - udev->dbi_thresh) + space;
if (blocks_left < blocks_needed) {
pr_debug("no data space: only %lu available, but ask for %zu\n",
@@ -688,23 +764,9 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
return false;
}
- /* Try to expand the thresh */
- if (!udev->dbi_thresh) {
- /* From idle state */
- uint32_t init_thresh = DATA_BLOCK_INIT_BITS;
-
- udev->dbi_thresh = max(blocks_needed, init_thresh);
- } else {
- /*
- * Grow the data area by max(blocks needed,
- * dbi_thresh / 2), but limited to the max
- * DATA_BLOCK_BITS size.
- */
- grow = max(blocks_needed, udev->dbi_thresh / 2);
- udev->dbi_thresh += grow;
- if (udev->dbi_thresh > DATA_BLOCK_BITS)
- udev->dbi_thresh = DATA_BLOCK_BITS;
- }
+ udev->dbi_thresh += blocks_needed;
+ if (udev->dbi_thresh > udev->max_blocks)
+ udev->dbi_thresh = udev->max_blocks;
}
return tcmu_get_empty_blocks(udev, cmd);
@@ -731,14 +793,14 @@ static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
return command_size;
}
-static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
+ struct timer_list *timer)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
- unsigned long tmo = udev->cmd_time_out;
int cmd_id;
if (tcmu_cmd->cmd_id)
- return 0;
+ goto setup_timer;
cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
if (cmd_id < 0) {
@@ -747,16 +809,58 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
}
tcmu_cmd->cmd_id = cmd_id;
+ pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id,
+ udev->name, tmo / MSEC_PER_SEC);
+
+setup_timer:
if (!tmo)
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ mod_timer(timer, tcmu_cmd->deadline);
return 0;
}
-static sense_reason_t
-tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
+static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned int tmo;
+ int ret;
+
+ /*
+ * For backwards compat if qfull_time_out is not set use
+ * cmd_time_out and if that's not set use the default time out.
+ */
+ if (!udev->qfull_time_out)
+ return -ETIMEDOUT;
+ else if (udev->qfull_time_out > 0)
+ tmo = udev->qfull_time_out;
+ else if (udev->cmd_time_out)
+ tmo = udev->cmd_time_out;
+ else
+ tmo = TCMU_TIME_OUT;
+
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
+ if (ret)
+ return ret;
+
+ list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+ pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
+ tcmu_cmd->cmd_id, udev->name);
+ return 0;
+}
+
+/**
+ * queue_cmd_ring - queue cmd to ring or internally
+ * @tcmu_cmd: cmd to queue
+ * @scsi_err: TCM error code if failure (-1) returned.
+ *
+ * Returns:
+ * -1 we cannot queue internally or to the ring.
+ * 0 success
+ * 1 internally queued to wait for ring memory to free.
+ */
+static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
@@ -770,8 +874,17 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
bool copy_to_data_area;
size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
- if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ *scsi_err = TCM_NO_SENSE;
+
+ if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
+ *scsi_err = TCM_LUN_BUSY;
+ return -1;
+ }
+
+ if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
+ *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -1;
+ }
/*
* Must be a certain minimum size for response sense info, but
@@ -788,7 +901,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- mutex_lock(&udev->cmdr_lock);
+ if (!list_empty(&udev->cmdr_queue))
+ goto queue;
mb = udev->mb_addr;
cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
@@ -797,33 +911,18 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
"cmd ring/data area\n", command_size, data_length,
udev->cmdr_size, udev->data_size);
- mutex_unlock(&udev->cmdr_lock);
- return TCM_INVALID_CDB_FIELD;
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
}
- while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
- int ret;
- DEFINE_WAIT(__wait);
-
- prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
-
- pr_debug("sleeping for ring space\n");
- mutex_unlock(&udev->cmdr_lock);
- if (udev->cmd_time_out)
- ret = schedule_timeout(
- msecs_to_jiffies(udev->cmd_time_out));
- else
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
- finish_wait(&udev->wait_cmdr, &__wait);
- if (!ret) {
- pr_warn("tcmu: command timed out\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
- mutex_lock(&udev->cmdr_lock);
-
- /* We dropped cmdr_lock, cmd_head is stale */
- cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
+ if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
+ /*
+ * Don't leave commands partially setup because the unmap
+ * thread might need the blocks to make forward progress.
+ */
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+ goto queue;
}
/* Insert a PAD if end-of-ring space is too small */
@@ -855,41 +954,29 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
iov_cnt = 0;
copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
|| se_cmd->se_cmd_flags & SCF_BIDI);
- ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, &iov, &iov_cnt,
- copy_to_data_area);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
-
- pr_err("tcmu: alloc and scatter data failed\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
+ scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
+ se_cmd->t_data_nents, &iov, &iov_cnt,
+ copy_to_data_area);
entry->req.iov_cnt = iov_cnt;
/* Handle BIDI commands */
iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
- ret = scatter_data_area(udev, tcmu_cmd,
- se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents,
- &iov, &iov_cnt, false);
- if (ret) {
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- mutex_unlock(&udev->cmdr_lock);
-
- pr_err("tcmu: alloc and scatter bidi data failed\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
+ scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
+ se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
+ false);
}
entry->req.iov_bidi_cnt = iov_cnt;
- ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out,
+ &udev->cmd_timer);
if (ret) {
tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
mutex_unlock(&udev->cmdr_lock);
- return TCM_OUT_OF_RESOURCES;
+
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
}
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
@@ -911,36 +998,40 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
- mutex_unlock(&udev->cmdr_lock);
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
- if (udev->cmd_time_out)
- mod_timer(&udev->timeout, round_jiffies_up(jiffies +
- msecs_to_jiffies(udev->cmd_time_out)));
+ return 0;
+
+queue:
+ if (add_to_cmdr_queue(tcmu_cmd)) {
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
+ }
- return TCM_NO_SENSE;
+ return 1;
}
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
+ struct se_device *se_dev = se_cmd->se_dev;
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- sense_reason_t ret;
+ sense_reason_t scsi_ret;
+ int ret;
tcmu_cmd = tcmu_alloc_cmd(se_cmd);
if (!tcmu_cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- ret = tcmu_queue_cmd_ring(tcmu_cmd);
- if (ret != TCM_NO_SENSE) {
- pr_err("TCMU: Could not queue command\n");
-
+ mutex_lock(&udev->cmdr_lock);
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
+ mutex_unlock(&udev->cmdr_lock);
+ if (ret < 0)
tcmu_free_cmd(tcmu_cmd);
- }
-
- return ret;
+ return scsi_ret;
}
static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
@@ -1011,12 +1102,10 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
}
WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
- spin_lock(&udev->commands_lock);
cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
- spin_unlock(&udev->commands_lock);
-
if (!cmd) {
- pr_err("cmd_id not found, ring is broken\n");
+ pr_err("cmd_id %u not found, ring is broken\n",
+ entry->hdr.cmd_id);
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
break;
}
@@ -1030,10 +1119,20 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
handled++;
}
- if (mb->cmd_tail == mb->cmd_head)
- del_timer(&udev->timeout); /* no more pending cmds */
+ if (mb->cmd_tail == mb->cmd_head) {
+ /* no more pending commands */
+ del_timer(&udev->cmd_timer);
- wake_up(&udev->wait_cmdr);
+ if (list_empty(&udev->cmdr_queue)) {
+ /*
+ * no more pending or waiting commands so try to
+ * reclaim blocks if needed.
+ */
+ if (atomic_read(&global_db_count) >
+ tcmu_global_max_blocks)
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+ }
+ }
return handled;
}
@@ -1041,6 +1140,10 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
static int tcmu_check_expired_cmd(int id, void *p, void *data)
{
struct tcmu_cmd *cmd = p;
+ struct tcmu_dev *udev = cmd->tcmu_dev;
+ u8 scsi_status;
+ struct se_cmd *se_cmd;
+ bool is_running;
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
return 0;
@@ -1048,29 +1151,61 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (!time_after(jiffies, cmd->deadline))
return 0;
- set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
- target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
- cmd->se_cmd = NULL;
+ is_running = list_empty(&cmd->cmdr_queue_entry);
+ se_cmd = cmd->se_cmd;
+
+ if (is_running) {
+ /*
+ * If cmd_time_out is disabled but qfull is set deadline
+ * will only reflect the qfull timeout. Ignore it.
+ */
+ if (!udev->cmd_time_out)
+ return 0;
+
+ set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
+ /*
+ * target_complete_cmd will translate this to LUN COMM FAILURE
+ */
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ } else {
+ list_del_init(&cmd->cmdr_queue_entry);
+
+ idr_remove(&udev->commands, id);
+ tcmu_free_cmd(cmd);
+ scsi_status = SAM_STAT_TASK_SET_FULL;
+ }
+
+ pr_debug("Timing out cmd %u on dev %s that is %s.\n",
+ id, udev->name, is_running ? "inflight" : "queued");
+ target_complete_cmd(se_cmd, scsi_status);
return 0;
}
-static void tcmu_device_timedout(struct timer_list *t)
+static void tcmu_device_timedout(struct tcmu_dev *udev)
{
- struct tcmu_dev *udev = from_timer(udev, t, timeout);
- unsigned long flags;
+ spin_lock(&timed_out_udevs_lock);
+ if (list_empty(&udev->timedout_entry))
+ list_add_tail(&udev->timedout_entry, &timed_out_udevs);
+ spin_unlock(&timed_out_udevs_lock);
- spin_lock_irqsave(&udev->commands_lock, flags);
- idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
- spin_unlock_irqrestore(&udev->commands_lock, flags);
+ schedule_delayed_work(&tcmu_unmap_work, 0);
+}
- /* Try to wake up the ummap thread */
- wake_up(&unmap_wait);
+static void tcmu_cmd_timedout(struct timer_list *t)
+{
+ struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
- /*
- * We don't need to wakeup threads on wait_cmdr since they have their
- * own timeout.
- */
+ pr_debug("%s cmd timeout has expired\n", udev->name);
+ tcmu_device_timedout(udev);
+}
+
+static void tcmu_qfull_timedout(struct timer_list *t)
+{
+ struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
+
+ pr_debug("%s qfull timeout has expired\n", udev->name);
+ tcmu_device_timedout(udev);
}
static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
@@ -1110,14 +1245,17 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
udev->hba = hba;
udev->cmd_time_out = TCMU_TIME_OUT;
+ udev->qfull_time_out = -1;
- init_waitqueue_head(&udev->wait_cmdr);
+ udev->max_blocks = DATA_BLOCK_BITS_DEF;
mutex_init(&udev->cmdr_lock);
+ INIT_LIST_HEAD(&udev->timedout_entry);
+ INIT_LIST_HEAD(&udev->cmdr_queue);
idr_init(&udev->commands);
- spin_lock_init(&udev->commands_lock);
- timer_setup(&udev->timeout, tcmu_device_timedout, 0);
+ timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
+ timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
@@ -1127,13 +1265,79 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
+static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ LIST_HEAD(cmds);
+ bool drained = true;
+ sense_reason_t scsi_ret;
+ int ret;
+
+ if (list_empty(&udev->cmdr_queue))
+ return true;
+
+ pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
+
+ list_splice_init(&udev->cmdr_queue, &cmds);
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
+ list_del_init(&tcmu_cmd->cmdr_queue_entry);
+
+ pr_debug("removing cmd %u on dev %s from queue\n",
+ tcmu_cmd->cmd_id, udev->name);
+
+ if (fail) {
+ idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ /*
+ * We were not able to even start the command, so
+ * fail with busy to allow a retry in case runner
+ * was only temporarily down. If the device is being
+ * removed then LIO core will do the right thing and
+ * fail the retry.
+ */
+ target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
+ tcmu_free_cmd(tcmu_cmd);
+ continue;
+ }
+
+ ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
+ if (ret < 0) {
+ pr_debug("cmd %u on dev %s failed with %u\n",
+ tcmu_cmd->cmd_id, udev->name, scsi_ret);
+
+ idr_remove(&udev->commands, tcmu_cmd->cmd_id);
+ /*
+ * Ignore scsi_ret for now. target_complete_cmd
+ * drops it.
+ */
+ target_complete_cmd(tcmu_cmd->se_cmd,
+ SAM_STAT_CHECK_CONDITION);
+ tcmu_free_cmd(tcmu_cmd);
+ } else if (ret > 0) {
+ pr_debug("ran out of space during cmdr queue run\n");
+ /*
+ * cmd was requeued, so just put all cmds back in
+ * the queue
+ */
+ list_splice_tail(&cmds, &udev->cmdr_queue);
+ drained = false;
+ goto done;
+ }
+ }
+ if (list_empty(&udev->cmdr_queue))
+ del_timer(&udev->qfull_timer);
+done:
+ return drained;
+}
+
static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
{
- struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
+ struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
- mutex_lock(&tcmu_dev->cmdr_lock);
- tcmu_handle_completions(tcmu_dev);
- mutex_unlock(&tcmu_dev->cmdr_lock);
+ mutex_lock(&udev->cmdr_lock);
+ tcmu_handle_completions(udev);
+ run_cmdr_queue(udev, false);
+ mutex_unlock(&udev->cmdr_lock);
return 0;
}
@@ -1158,7 +1362,6 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma)
static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
{
struct page *page;
- int ret;
mutex_lock(&udev->cmdr_lock);
page = tcmu_get_block_page(udev, dbi);
@@ -1168,42 +1371,12 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
}
/*
- * Normally it shouldn't be here:
- * Only when the userspace has touched the blocks which
- * are out of the tcmu_cmd's data iov[], and will return
- * one zeroed page.
+ * Userspace messed up and passed in a address not in the
+ * data iov passed to it.
*/
- pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
- pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
-
- if (dbi >= udev->dbi_thresh) {
- /* Extern the udev->dbi_thresh to dbi + 1 */
- udev->dbi_thresh = dbi + 1;
- udev->dbi_max = dbi;
- }
-
- page = radix_tree_lookup(&udev->data_blocks, dbi);
- if (!page) {
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page) {
- mutex_unlock(&udev->cmdr_lock);
- return NULL;
- }
-
- ret = radix_tree_insert(&udev->data_blocks, dbi, page);
- if (ret) {
- mutex_unlock(&udev->cmdr_lock);
- __free_page(page);
- return NULL;
- }
-
- /*
- * Since this case is rare in page fault routine, here we
- * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
- * to reduce possible page fault call trace.
- */
- atomic_inc(&global_db_count);
- }
+ pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n",
+ dbi, udev->name);
+ page = NULL;
mutex_unlock(&udev->cmdr_lock);
return page;
@@ -1238,7 +1411,7 @@ static int tcmu_vma_fault(struct vm_fault *vmf)
dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
page = tcmu_try_get_block_page(udev, dbi);
if (!page)
- return VM_FAULT_NOPAGE;
+ return VM_FAULT_SIGBUS;
}
get_page(page);
@@ -1260,7 +1433,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
vma->vm_private_data = udev;
/* Ensure the mmap is exactly the right size */
- if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
+ if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
return -EINVAL;
return 0;
@@ -1301,21 +1474,19 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
return -EINVAL;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
+static void tcmu_blocks_release(struct radix_tree_root *blocks,
+ int start, int end)
{
int i;
struct page *page;
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
+ for (i = start; i < end; i++) {
+ page = radix_tree_delete(blocks, i);
if (page) {
__free_page(page);
atomic_dec(&global_db_count);
}
}
- mutex_unlock(&udev->cmdr_lock);
}
static void tcmu_dev_kref_release(struct kref *kref)
@@ -1329,17 +1500,23 @@ static void tcmu_dev_kref_release(struct kref *kref)
vfree(udev->mb_addr);
udev->mb_addr = NULL;
+ spin_lock_bh(&timed_out_udevs_lock);
+ if (!list_empty(&udev->timedout_entry))
+ list_del(&udev->timedout_entry);
+ spin_unlock_bh(&timed_out_udevs_lock);
+
/* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
+ mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
all_expired = false;
}
idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
WARN_ON(!all_expired);
- tcmu_blocks_release(udev);
+ tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
+ kfree(udev->data_bitmap);
+ mutex_unlock(&udev->cmdr_lock);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
@@ -1406,7 +1583,7 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
wake_up_all(&udev->nl_cmd_wq);
- return ret;;
+ return ret;
}
static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
@@ -1515,6 +1692,13 @@ static int tcmu_configure_device(struct se_device *dev)
info = &udev->uio_info;
+ udev->data_bitmap = kzalloc(BITS_TO_LONGS(udev->max_blocks) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!udev->data_bitmap) {
+ ret = -ENOMEM;
+ goto err_bitmap_alloc;
+ }
+
udev->mb_addr = vzalloc(CMDR_SIZE);
if (!udev->mb_addr) {
ret = -ENOMEM;
@@ -1524,9 +1708,8 @@ static int tcmu_configure_device(struct se_device *dev)
/* mailbox fits in first part of CMDR space */
udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
udev->data_off = CMDR_SIZE;
- udev->data_size = DATA_SIZE;
+ udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE;
udev->dbi_thresh = 0; /* Default in Idle state */
- udev->waiting_global = false;
/* Initialise the mailbox of the ring buffer */
mb = udev->mb_addr;
@@ -1543,7 +1726,7 @@ static int tcmu_configure_device(struct se_device *dev)
info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
- info->mem[0].size = TCMU_RING_SIZE;
+ info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE;
info->mem[0].memtype = UIO_MEM_NONE;
info->irqcontrol = tcmu_irqcontrol;
@@ -1596,6 +1779,9 @@ err_register:
vfree(udev->mb_addr);
udev->mb_addr = NULL;
err_vzalloc:
+ kfree(udev->data_bitmap);
+ udev->data_bitmap = NULL;
+err_bitmap_alloc:
kfree(info->name);
info->name = NULL;
@@ -1619,7 +1805,8 @@ static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- del_timer_sync(&udev->timeout);
+ del_timer_sync(&udev->cmd_timer);
+ del_timer_sync(&udev->qfull_timer);
mutex_lock(&root_udev_mutex);
list_del(&udev->node);
@@ -1633,9 +1820,81 @@ static void tcmu_destroy_device(struct se_device *dev)
kref_put(&udev->kref, tcmu_dev_kref_release);
}
+static void tcmu_unblock_dev(struct tcmu_dev *udev)
+{
+ mutex_lock(&udev->cmdr_lock);
+ clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static void tcmu_block_dev(struct tcmu_dev *udev)
+{
+ mutex_lock(&udev->cmdr_lock);
+
+ if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
+ goto unlock;
+
+ /* complete IO that has executed successfully */
+ tcmu_handle_completions(udev);
+ /* fail IO waiting to be queued */
+ run_cmdr_queue(udev, true);
+
+unlock:
+ mutex_unlock(&udev->cmdr_lock);
+}
+
+static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
+{
+ struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
+ int i;
+
+ mutex_lock(&udev->cmdr_lock);
+
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (!list_empty(&cmd->cmdr_queue_entry))
+ continue;
+
+ pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
+ cmd->cmd_id, udev->name,
+ test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags));
+
+ idr_remove(&udev->commands, i);
+ if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ if (err_level == 1) {
+ /*
+ * Userspace was not able to start the
+ * command or it is retryable.
+ */
+ target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
+ } else {
+ /* hard failure */
+ target_complete_cmd(cmd->se_cmd,
+ SAM_STAT_CHECK_CONDITION);
+ }
+ }
+ tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
+ tcmu_free_cmd(cmd);
+ }
+
+ mb = udev->mb_addr;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+ pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
+ mb->cmd_tail, mb->cmd_head);
+
+ udev->cmdr_last_cleaned = 0;
+ mb->cmd_tail = 0;
+ mb->cmd_head = 0;
+ tcmu_flush_dcache_range(mb, sizeof(*mb));
+
+ del_timer(&udev->cmd_timer);
+
+ mutex_unlock(&udev->cmdr_lock);
+}
+
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_nl_reply_supported, Opt_err,
+ Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err,
};
static match_table_t tokens = {
@@ -1644,6 +1903,7 @@ static match_table_t tokens = {
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_nl_reply_supported, "nl_reply_supported=%d"},
+ {Opt_max_data_area_mb, "max_data_area_mb=%u"},
{Opt_err, NULL}
};
@@ -1677,7 +1937,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
struct tcmu_dev *udev = TCMU_DEV(dev);
char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, token;
+ int ret = 0, token, tmpval;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1729,6 +1989,39 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
if (ret < 0)
pr_err("kstrtoint() failed for nl_reply_supported=\n");
break;
+ case Opt_max_data_area_mb:
+ if (dev->export_count) {
+ pr_err("Unable to set max_data_area_mb while exports exist\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &tmpval);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoint() failed for max_data_area_mb=\n");
+ break;
+ }
+
+ if (tmpval <= 0) {
+ pr_err("Invalid max_data_area %d\n", tmpval);
+ ret = -EINVAL;
+ break;
+ }
+
+ udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
+ if (udev->max_blocks > tcmu_global_max_blocks) {
+ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
+ tmpval,
+ TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ udev->max_blocks = tcmu_global_max_blocks;
+ }
+ break;
default:
break;
}
@@ -1748,7 +2041,9 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
bl = sprintf(b + bl, "Config: %s ",
udev->dev_config[0] ? udev->dev_config : "NULL");
- bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
+ bl += sprintf(b + bl, "Size: %zu ", udev->dev_size);
+ bl += sprintf(b + bl, "MaxDataAreaMB: %u\n",
+ TCMU_BLOCKS_TO_MBS(udev->max_blocks));
return bl;
}
@@ -1800,6 +2095,51 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag
}
CONFIGFS_ATTR(tcmu_, cmd_time_out);
+static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
+ udev->qfull_time_out :
+ udev->qfull_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s32 val;
+ int ret;
+
+ ret = kstrtos32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val >= 0) {
+ udev->qfull_time_out = val * MSEC_PER_SEC;
+ } else {
+ printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
+ return -EINVAL;
+ }
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, qfull_time_out);
+
+static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%u\n",
+ TCMU_BLOCKS_TO_MBS(udev->max_blocks));
+}
+CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
+
static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
{
struct se_dev_attrib *da = container_of(to_config_group(item),
@@ -1943,8 +2283,74 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
}
CONFIGFS_ATTR(tcmu_, emulate_write_cache);
+static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+
+ if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
+ return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
+ else
+ return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
+}
+
+static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val > 1) {
+ pr_err("Invalid block value %d\n", val);
+ return -EINVAL;
+ }
+
+ if (!val)
+ tcmu_unblock_dev(udev);
+ else
+ tcmu_block_dev(udev);
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, block_dev);
+
+static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_device *se_dev = container_of(to_config_group(item),
+ struct se_device,
+ dev_action_group);
+ struct tcmu_dev *udev = TCMU_DEV(se_dev);
+ u8 val;
+ int ret;
+
+ ret = kstrtou8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != 1 && val != 2) {
+ pr_err("Invalid reset ring value %d\n", val);
+ return -EINVAL;
+ }
+
+ tcmu_reset_ring(udev, val);
+ return count;
+}
+CONFIGFS_ATTR_WO(tcmu_, reset_ring);
+
static struct configfs_attribute *tcmu_attrib_attrs[] = {
&tcmu_attr_cmd_time_out,
+ &tcmu_attr_qfull_time_out,
+ &tcmu_attr_max_data_area_mb,
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
@@ -1954,6 +2360,12 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = {
static struct configfs_attribute **tcmu_attrs;
+static struct configfs_attribute *tcmu_action_attrs[] = {
+ &tcmu_attr_block_dev,
+ &tcmu_attr_reset_ring,
+ NULL,
+};
+
static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
@@ -1969,85 +2381,93 @@ static struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
- .tb_dev_attrib_attrs = NULL,
+ .tb_dev_action_attrs = tcmu_action_attrs,
};
-static int unmap_thread_fn(void *data)
+static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
- uint32_t start, end, block;
- struct page *page;
- int i;
+ u32 start, end, block, total_freed = 0;
- while (!kthread_should_stop()) {
- DEFINE_WAIT(__wait);
+ if (atomic_read(&global_db_count) <= tcmu_global_max_blocks)
+ return;
- prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
- schedule();
- finish_wait(&unmap_wait, &__wait);
+ mutex_lock(&root_udev_mutex);
+ list_for_each_entry(udev, &root_udev, node) {
+ mutex_lock(&udev->cmdr_lock);
- if (kthread_should_stop())
- break;
+ /* Try to complete the finished commands first */
+ tcmu_handle_completions(udev);
- mutex_lock(&root_udev_mutex);
- list_for_each_entry(udev, &root_udev, node) {
- mutex_lock(&udev->cmdr_lock);
+ /* Skip the udevs in idle */
+ if (!udev->dbi_thresh) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
- /* Try to complete the finished commands first */
- tcmu_handle_completions(udev);
+ end = udev->dbi_max + 1;
+ block = find_last_bit(udev->data_bitmap, end);
+ if (block == udev->dbi_max) {
+ /*
+ * The last bit is dbi_max, so it is not possible
+ * reclaim any blocks.
+ */
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ } else if (block == end) {
+ /* The current udev will goto idle state */
+ udev->dbi_thresh = start = 0;
+ udev->dbi_max = 0;
+ } else {
+ udev->dbi_thresh = start = block + 1;
+ udev->dbi_max = block;
+ }
- /* Skip the udevs waiting the global pool or in idle */
- if (udev->waiting_global || !udev->dbi_thresh) {
- mutex_unlock(&udev->cmdr_lock);
- continue;
- }
+ /* Here will truncate the data area from off */
+ off = udev->data_off + start * DATA_BLOCK_SIZE;
+ unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
- end = udev->dbi_max + 1;
- block = find_last_bit(udev->data_bitmap, end);
- if (block == udev->dbi_max) {
- /*
- * The last bit is dbi_max, so there is
- * no need to shrink any blocks.
- */
- mutex_unlock(&udev->cmdr_lock);
- continue;
- } else if (block == end) {
- /* The current udev will goto idle state */
- udev->dbi_thresh = start = 0;
- udev->dbi_max = 0;
- } else {
- udev->dbi_thresh = start = block + 1;
- udev->dbi_max = block;
- }
+ /* Release the block pages */
+ tcmu_blocks_release(&udev->data_blocks, start, end);
+ mutex_unlock(&udev->cmdr_lock);
- /* Here will truncate the data area from off */
- off = udev->data_off + start * DATA_BLOCK_SIZE;
- unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
-
- /* Release the block pages */
- for (i = start; i < end; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
- }
+ total_freed += end - start;
+ pr_debug("Freed %u blocks (total %u) from %s.\n", end - start,
+ total_freed, udev->name);
+ }
+ mutex_unlock(&root_udev_mutex);
- /*
- * Try to wake up the udevs who are waiting
- * for the global data pool.
- */
- list_for_each_entry(udev, &root_udev, node) {
- if (udev->waiting_global)
- wake_up(&udev->wait_cmdr);
- }
- mutex_unlock(&root_udev_mutex);
+ if (atomic_read(&global_db_count) > tcmu_global_max_blocks)
+ schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
+}
+
+static void check_timedout_devices(void)
+{
+ struct tcmu_dev *udev, *tmp_dev;
+ LIST_HEAD(devs);
+
+ spin_lock_bh(&timed_out_udevs_lock);
+ list_splice_init(&timed_out_udevs, &devs);
+
+ list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
+ list_del_init(&udev->timedout_entry);
+ spin_unlock_bh(&timed_out_udevs_lock);
+
+ mutex_lock(&udev->cmdr_lock);
+ idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+ mutex_unlock(&udev->cmdr_lock);
+
+ spin_lock_bh(&timed_out_udevs_lock);
}
- return 0;
+ spin_unlock_bh(&timed_out_udevs_lock);
+}
+
+static void tcmu_unmap_work_fn(struct work_struct *work)
+{
+ check_timedout_devices();
+ find_free_blocks();
}
static int __init tcmu_module_init(void)
@@ -2056,6 +2476,8 @@ static int __init tcmu_module_init(void)
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
+ INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
+
tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
sizeof(struct tcmu_cmd),
__alignof__(struct tcmu_cmd),
@@ -2101,17 +2523,8 @@ static int __init tcmu_module_init(void)
if (ret)
goto out_attrs;
- init_waitqueue_head(&unmap_wait);
- unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap");
- if (IS_ERR(unmap_thread)) {
- ret = PTR_ERR(unmap_thread);
- goto out_unreg_transport;
- }
-
return 0;
-out_unreg_transport:
- target_backend_unregister(&tcmu_ops);
out_attrs:
kfree(tcmu_attrs);
out_unreg_genl:
@@ -2126,7 +2539,7 @@ out_free_cache:
static void __exit tcmu_module_exit(void)
{
- kthread_stop(unmap_thread);
+ cancel_delayed_work_sync(&tcmu_unmap_work);
target_backend_unregister(&tcmu_ops);
kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family);
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 4c8b80f1c688..870e84fb6e39 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -197,25 +197,20 @@ int __init setup_earlycon(char *buf)
}
/*
- * When CONFIG_ACPI_SPCR_TABLE is defined, "earlycon" without parameters in
- * command line does not start DT earlycon immediately, instead it defers
- * starting it until DT/ACPI decision is made. At that time if ACPI is enabled
- * call parse_spcr(), else call early_init_dt_scan_chosen_stdout()
+ * This defers the initialization of the early console until after ACPI has
+ * been initialized.
*/
-bool earlycon_init_is_deferred __initdata;
+bool earlycon_acpi_spcr_enable __initdata;
/* early_param wrapper for setup_earlycon() */
static int __init param_setup_earlycon(char *buf)
{
int err;
- /*
- * Just 'earlycon' is a valid param for devicetree earlycons;
- * don't generate a warning from parse_early_params() in that case
- */
+ /* Just 'earlycon' is a valid param for devicetree and ACPI SPCR. */
if (!buf || !buf[0]) {
if (IS_ENABLED(CONFIG_ACPI_SPCR_TABLE)) {
- earlycon_init_is_deferred = true;
+ earlycon_acpi_spcr_enable = true;
return 0;
} else if (!buf) {
return early_init_dt_scan_chosen_stdout();
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9c3f8160ef24..c613d2e3d371 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1015,7 +1015,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
vhost_net_stop(n, &tx_sock, &rx_sock);
vhost_net_flush(n);
vhost_dev_stop(&n->dev);
- vhost_dev_cleanup(&n->dev, false);
+ vhost_dev_cleanup(&n->dev);
vhost_net_vq_reset(n);
if (tx_sock)
sockfd_put(tx_sock);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 71517b3c5558..7ad57094d736 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -586,8 +586,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
- memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
-
+ memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg;
cmd->tvc_upages = pages;
@@ -1420,7 +1419,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
mutex_unlock(&vs->dev.mutex);
vhost_scsi_clear_endpoint(vs, &t);
vhost_dev_stop(&vs->dev);
- vhost_dev_cleanup(&vs->dev, false);
+ vhost_dev_cleanup(&vs->dev);
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
vhost_scsi_flush(vs);
kfree(vs->dev.vqs);
@@ -1725,7 +1724,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
return -EEXIST;
}
- tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
+ tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
if (!tv_nexus) {
mutex_unlock(&tpg->tv_tpg_mutex);
pr_err("Unable to allocate struct vhost_scsi_nexus\n");
@@ -1926,7 +1925,7 @@ vhost_scsi_make_tpg(struct se_wwn *wwn,
if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL);
- tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
+ tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
@@ -1980,7 +1979,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf,
/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL); */
- tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
+ tport = kzalloc(sizeof(*tport), GFP_KERNEL);
if (!tport) {
pr_err("Unable to allocate struct vhost_scsi_tport");
return ERR_PTR(-ENOMEM);
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 3cc98c07dcd3..906b8f0f19f7 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -157,7 +157,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
vhost_test_stop(n, &private);
vhost_test_flush(n);
- vhost_dev_cleanup(&n->dev, false);
+ vhost_dev_cleanup(&n->dev);
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_test_flush(n);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 8d4374606756..2db5af8e8652 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -181,7 +181,6 @@ void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
clear_bit(VHOST_WORK_QUEUED, &work->flags);
work->fn = fn;
- init_waitqueue_head(&work->done);
}
EXPORT_SYMBOL_GPL(vhost_work_init);
@@ -319,10 +318,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
vq->acked_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
- vq->error = NULL;
vq->kick = NULL;
vq->call_ctx = NULL;
- vq->call = NULL;
vq->log_ctx = NULL;
vhost_reset_is_le(vq);
vhost_disable_cross_endian(vq);
@@ -422,7 +419,6 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->nvqs = nvqs;
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
- dev->log_file = NULL;
dev->umem = NULL;
dev->iotlb = NULL;
dev->mm = NULL;
@@ -544,7 +540,7 @@ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
{
int i;
- vhost_dev_cleanup(dev, true);
+ vhost_dev_cleanup(dev);
/* Restore memory to default empty mapping. */
INIT_LIST_HEAD(&umem->umem_list);
@@ -611,31 +607,23 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
-/* Caller should have device mutex if and only if locked is set */
-void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
+void vhost_dev_cleanup(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
- if (dev->vqs[i]->error)
- fput(dev->vqs[i]->error);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
if (dev->vqs[i]->call_ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx);
- if (dev->vqs[i]->call)
- fput(dev->vqs[i]->call);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
- if (dev->log_file)
- fput(dev->log_file);
- dev->log_file = NULL;
/* No one will access memory at this point */
vhost_umem_clean(dev->umem);
dev->umem = NULL;
@@ -1492,38 +1480,24 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != vq->call) {
- filep = vq->call;
- ctx = vq->call_ctx;
- vq->call = eventfp;
- vq->call_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+ swap(ctx, vq->call_ctx);
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
- eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = f.fd == -1 ? NULL : eventfd_ctx_fdget(f.fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != vq->error) {
- filep = vq->error;
- vq->error = eventfp;
- ctx = vq->error_ctx;
- vq->error_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+ swap(ctx, vq->error_ctx);
break;
case VHOST_SET_VRING_ENDIAN:
r = vhost_set_vring_endian(vq, argp);
@@ -1551,7 +1525,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
if (pollstop && vq->handle_kick)
vhost_poll_stop(&vq->poll);
- if (ctx)
+ if (!IS_ERR_OR_NULL(ctx))
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
@@ -1594,8 +1568,7 @@ EXPORT_SYMBOL_GPL(vhost_init_device_iotlb);
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
- struct file *eventfp, *filep = NULL;
- struct eventfd_ctx *ctx = NULL;
+ struct eventfd_ctx *ctx;
u64 p;
long r;
int i, fd;
@@ -1641,19 +1614,12 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
- eventfp = fd == -1 ? NULL : eventfd_fget(fd);
- if (IS_ERR(eventfp)) {
- r = PTR_ERR(eventfp);
+ ctx = fd == -1 ? NULL : eventfd_ctx_fdget(fd);
+ if (IS_ERR(ctx)) {
+ r = PTR_ERR(ctx);
break;
}
- if (eventfp != d->log_file) {
- filep = d->log_file;
- d->log_file = eventfp;
- ctx = d->log_ctx;
- d->log_ctx = eventfp ?
- eventfd_ctx_fileget(eventfp) : NULL;
- } else
- filep = eventfp;
+ swap(ctx, d->log_ctx);
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx;
@@ -1661,8 +1627,6 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
}
if (ctx)
eventfd_ctx_put(ctx);
- if (filep)
- fput(filep);
break;
default:
r = -ENOIOCTLCMD;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 7876a3d7d1b3..ac4b6056f19a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -20,10 +20,6 @@ typedef void (*vhost_work_fn_t)(struct vhost_work *work);
struct vhost_work {
struct llist_node node;
vhost_work_fn_t fn;
- wait_queue_head_t done;
- int flushing;
- unsigned queue_seq;
- unsigned done_seq;
unsigned long flags;
};
@@ -96,8 +92,6 @@ struct vhost_virtqueue {
struct vring_used __user *used;
const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
- struct file *call;
- struct file *error;
struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx;
struct eventfd_ctx *log_ctx;
@@ -163,7 +157,6 @@ struct vhost_dev {
struct mutex mutex;
struct vhost_virtqueue **vqs;
int nvqs;
- struct file *log_file;
struct eventfd_ctx *log_ctx;
struct llist_head work_list;
struct task_struct *worker;
@@ -181,7 +174,7 @@ bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
struct vhost_umem *vhost_dev_reset_owner_prepare(void);
void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
-void vhost_dev_cleanup(struct vhost_dev *, bool locked);
+void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5a5e981bd8e4..0d14e2ff19f1 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -599,7 +599,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
}
spin_unlock_bh(&vsock->send_pkt_list_lock);
- vhost_dev_cleanup(&vsock->dev, false);
+ vhost_dev_cleanup(&vsock->dev);
kfree(vsock->dev.vqs);
vhost_vsock_free(vsock);
return 0;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 6962b4583fd7..11e699f1062b 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1156,7 +1156,6 @@ config FB_I810_I2C
bool "Enable DDC Support"
depends on FB_I810 && FB_I810_GTF
select FB_DDC
- help
config FB_LE80578
tristate "Intel LE80578 (Vermilion) support"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 39fe7247ff98..f0cac9e0eb94 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -40,6 +40,7 @@
#include <linux/regulator/consumer.h>
#include <linux/suspend.h>
#include <linux/component.h>
+#include <linux/pinctrl/consumer.h>
#include <video/omapfb_dss.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index eee09b4bdcf6..11dbc05d5720 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,6 +13,8 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
+
#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index eac3665aba6c..bc591fc12aef 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/seq_file.h>
#include <video/omapfb_dss.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 705373e4cf38..4af6ba220744 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,6 +14,8 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
#include <video/omapfb_dss.h>
#include "dss.h"
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index cff773f15b7e..35897649c24f 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -5,7 +5,11 @@ config VIRTIO
bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG
or CONFIG_S390_GUEST.
-menu "Virtio drivers"
+menuconfig VIRTIO_MENU
+ bool "Virtio drivers"
+ default y
+
+if VIRTIO_MENU
config VIRTIO_PCI
tristate "PCI driver for virtio devices"
@@ -79,4 +83,4 @@ config VIRTIO_MMIO_CMDLINE_DEVICES
If unsure, say 'N'.
-endmenu
+endif # VIRTIO_MENU
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index bf7ff3934d7f..59e36ef4920f 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -303,11 +303,21 @@ void unregister_virtio_driver(struct virtio_driver *driver)
}
EXPORT_SYMBOL_GPL(unregister_virtio_driver);
+/**
+ * register_virtio_device - register virtio device
+ * @dev : virtio device to be registered
+ *
+ * On error, the caller must call put_device on &@dev->dev (and not kfree),
+ * as another code path may have obtained a reference to @dev.
+ *
+ * Returns: 0 on suceess, -error on failure
+ */
int register_virtio_device(struct virtio_device *dev)
{
int err;
dev->dev.bus = &virtio_bus;
+ device_initialize(&dev->dev);
/* Assign a unique device index and hence name. */
err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL);
@@ -330,9 +340,11 @@ int register_virtio_device(struct virtio_device *dev)
INIT_LIST_HEAD(&dev->vqs);
- /* device_register() causes the bus infrastructure to look for a
- * matching driver. */
- err = device_register(&dev->dev);
+ /*
+ * device_add() causes the bus infrastructure to look for a matching
+ * driver.
+ */
+ err = device_add(&dev->dev);
if (err)
ida_simple_remove(&virtio_index_ida, dev->index);
out:
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index a1fb52cb3f0a..dfe5684000be 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -257,11 +257,13 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
struct sysinfo i;
unsigned int idx = 0;
long available;
+ unsigned long caches;
all_vm_events(events);
si_meminfo(&i);
available = si_mem_available();
+ caches = global_node_page_state(NR_FILE_PAGES);
#ifdef CONFIG_VM_EVENT_COUNTERS
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
@@ -277,6 +279,8 @@ static unsigned int update_balloon_stats(struct virtio_balloon *vb)
pages_to_bytes(i.totalram));
update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL,
pages_to_bytes(available));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES,
+ pages_to_bytes(caches));
return idx;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index c92131edfaba..67763d3c7abf 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -662,10 +662,8 @@ static int vm_cmdline_set(const char *device,
pdev = platform_device_register_resndata(&vm_cmdline_parent,
"virtio-mmio", vm_cmdline_id++,
resources, ARRAY_SIZE(resources), NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
- return 0;
+ return PTR_ERR_OR_ZERO(pdev);
}
static int vm_cmdline_get_device(struct device *dev, void *data)
@@ -725,7 +723,7 @@ static void vm_unregister_cmdline_devices(void)
/* Platform driver */
-static struct of_device_id virtio_mmio_match[] = {
+static const struct of_device_id virtio_mmio_match[] = {
{ .compatible = "virtio,mmio", },
{},
};
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 1c4797e53f68..48d4d1cf1cb6 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -513,7 +513,7 @@ static void virtio_pci_release_dev(struct device *_d)
static int virtio_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
- struct virtio_pci_device *vp_dev;
+ struct virtio_pci_device *vp_dev, *reg_dev = NULL;
int rc;
/* allocate our structure and fill it out */
@@ -551,6 +551,7 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
rc = register_virtio_device(&vp_dev->vdev);
+ reg_dev = vp_dev;
if (rc)
goto err_register;
@@ -564,7 +565,10 @@ err_register:
err_probe:
pci_disable_device(pci_dev);
err_enable_device:
- kfree(vp_dev);
+ if (reg_dev)
+ put_device(&vp_dev->vdev.dev);
+ else
+ kfree(vp_dev);
return rc;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f45114fd8e1e..27be107d6480 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -382,7 +382,7 @@ static void gnttab_handle_deferred(struct timer_list *unused)
if (entry->page) {
pr_debug("freeing g.e. %#x (pfn %#lx)\n",
entry->ref, page_to_pfn(entry->page));
- __free_page(entry->page);
+ put_page(entry->page);
} else
pr_info("freeing g.e. %#x\n", entry->ref);
kfree(entry);
@@ -438,7 +438,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
if (gnttab_end_foreign_access_ref(ref, readonly)) {
put_free_entry(ref);
if (page != 0)
- free_page(page);
+ put_page(virt_to_page(page));
} else
gnttab_add_deferred(ref, readonly,
page ? virt_to_page(page) : NULL);
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index c7822d8078b9..156e5aea36db 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -548,7 +548,7 @@ static void __pvcalls_back_accept(struct work_struct *work)
ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
if (ret == -EAGAIN) {
sock_release(sock);
- goto out_error;
+ return;
}
map = pvcalls_new_active_socket(fedata,
diff --git a/fs/afs/addr_list.c b/fs/afs/addr_list.c
index a537368ba0db..fd9f28b8a933 100644
--- a/fs/afs/addr_list.c
+++ b/fs/afs/addr_list.c
@@ -332,11 +332,18 @@ bool afs_iterate_addresses(struct afs_addr_cursor *ac)
*/
int afs_end_cursor(struct afs_addr_cursor *ac)
{
- if (ac->responded && ac->index != ac->start)
- WRITE_ONCE(ac->alist->index, ac->index);
+ struct afs_addr_list *alist;
+
+ alist = ac->alist;
+ if (alist) {
+ if (ac->responded && ac->index != ac->start)
+ WRITE_ONCE(alist->index, ac->index);
+ afs_put_addrlist(alist);
+ }
- afs_put_addrlist(ac->alist);
+ ac->addr = NULL;
ac->alist = NULL;
+ ac->begun = false;
return ac->error;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 23c7f395d718..ba2b458b36d1 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -17,10 +17,13 @@
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/sched.h>
+#include <linux/dns_resolver.h>
#include "internal.h"
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
+static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, struct dir_context *ctx);
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
@@ -64,6 +67,17 @@ const struct inode_operations afs_dir_inode_operations = {
.listxattr = afs_listxattr,
};
+const struct file_operations afs_dynroot_file_operations = {
+ .open = dcache_dir_open,
+ .release = dcache_dir_close,
+ .iterate_shared = dcache_readdir,
+ .llseek = dcache_dir_lseek,
+};
+
+const struct inode_operations afs_dynroot_inode_operations = {
+ .lookup = afs_dynroot_lookup,
+};
+
const struct dentry_operations afs_fs_dentry_operations = {
.d_revalidate = afs_d_revalidate,
.d_delete = afs_d_delete,
@@ -468,25 +482,58 @@ static int afs_do_lookup(struct inode *dir, struct dentry *dentry,
}
/*
+ * Probe to see if a cell may exist. This prevents positive dentries from
+ * being created unnecessarily.
+ */
+static int afs_probe_cell_name(struct dentry *dentry)
+{
+ struct afs_cell *cell;
+ const char *name = dentry->d_name.name;
+ size_t len = dentry->d_name.len;
+ int ret;
+
+ /* Names prefixed with a dot are R/W mounts. */
+ if (name[0] == '.') {
+ if (len == 1)
+ return -EINVAL;
+ name++;
+ len--;
+ }
+
+ cell = afs_lookup_cell_rcu(afs_d2net(dentry), name, len);
+ if (!IS_ERR(cell)) {
+ afs_put_cell(afs_d2net(dentry), cell);
+ return 0;
+ }
+
+ ret = dns_query("afsdb", name, len, "ipv4", NULL, NULL);
+ if (ret == -ENODATA)
+ ret = -EDESTADDRREQ;
+ return ret;
+}
+
+/*
* Try to auto mount the mountpoint with pseudo directory, if the autocell
* operation is setted.
*/
-static struct inode *afs_try_auto_mntpt(
- int ret, struct dentry *dentry, struct inode *dir, struct key *key,
- struct afs_fid *fid)
+static struct inode *afs_try_auto_mntpt(struct dentry *dentry,
+ struct inode *dir, struct afs_fid *fid)
{
- const char *devname = dentry->d_name.name;
struct afs_vnode *vnode = AFS_FS_I(dir);
struct inode *inode;
+ int ret = -ENOENT;
- _enter("%d, %p{%pd}, {%x:%u}, %p",
- ret, dentry, dentry, vnode->fid.vid, vnode->fid.vnode, key);
+ _enter("%p{%pd}, {%x:%u}",
+ dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
+
+ if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ goto out;
- if (ret != -ENOENT ||
- !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
+ ret = afs_probe_cell_name(dentry);
+ if (ret < 0)
goto out;
- inode = afs_iget_autocell(dir, devname, strlen(devname), key);
+ inode = afs_iget_pseudo_dir(dir->i_sb, false);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
goto out;
@@ -545,13 +592,16 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
ret = afs_do_lookup(dir, dentry, &fid, key);
if (ret < 0) {
- inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid);
- if (!IS_ERR(inode)) {
- key_put(key);
- goto success;
+ if (ret == -ENOENT) {
+ inode = afs_try_auto_mntpt(dentry, dir, &fid);
+ if (!IS_ERR(inode)) {
+ key_put(key);
+ goto success;
+ }
+
+ ret = PTR_ERR(inode);
}
- ret = PTR_ERR(inode);
key_put(key);
if (ret == -ENOENT) {
d_add(dentry, NULL);
@@ -583,12 +633,53 @@ success:
}
/*
+ * Look up an entry in a dynroot directory.
+ */
+static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct afs_vnode *vnode;
+ struct afs_fid fid;
+ struct inode *inode;
+ int ret;
+
+ vnode = AFS_FS_I(dir);
+
+ _enter("%pd", dentry);
+
+ ASSERTCMP(d_inode(dentry), ==, NULL);
+
+ if (dentry->d_name.len >= AFSNAMEMAX) {
+ _leave(" = -ENAMETOOLONG");
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ inode = afs_try_auto_mntpt(dentry, dir, &fid);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ if (ret == -ENOENT) {
+ d_add(dentry, NULL);
+ _leave(" = NULL [negative]");
+ return NULL;
+ }
+ _leave(" = %d [do]", ret);
+ return ERR_PTR(ret);
+ }
+
+ d_add(dentry, inode);
+ _leave(" = 0 { ino=%lu v=%u }",
+ d_inode(dentry)->i_ino, d_inode(dentry)->i_generation);
+ return NULL;
+}
+
+/*
* check that a dentry lookup hit has found a valid entry
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
+ struct afs_super_info *as = dentry->d_sb->s_fs_info;
struct afs_vnode *vnode, *dir;
struct afs_fid uninitialized_var(fid);
struct dentry *parent;
@@ -600,6 +691,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
+ if (as->dyn_root)
+ return 1;
+
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
_enter("{v={%x:%u} n=%pd fl=%lx},",
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index c7f17c44c7ce..6b39d0255b72 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -147,7 +147,7 @@ int afs_iget5_test(struct inode *inode, void *opaque)
*
* These pseudo inodes don't match anything.
*/
-static int afs_iget5_autocell_test(struct inode *inode, void *opaque)
+static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
{
return 0;
}
@@ -169,31 +169,34 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
}
/*
- * inode retrieval for autocell
+ * Create an inode for a dynamic root directory or an autocell dynamic
+ * automount dir.
*/
-struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
- int namesz, struct key *key)
+struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
{
struct afs_iget_data data;
struct afs_super_info *as;
struct afs_vnode *vnode;
- struct super_block *sb;
struct inode *inode;
static atomic_t afs_autocell_ino;
- _enter("{%x:%u},%*.*s,",
- AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
- namesz, namesz, dev_name ?: "");
+ _enter("");
- sb = dir->i_sb;
as = sb->s_fs_info;
- data.volume = as->volume;
- data.fid.vid = as->volume->vid;
- data.fid.unique = 0;
- data.fid.vnode = 0;
+ if (as->volume) {
+ data.volume = as->volume;
+ data.fid.vid = as->volume->vid;
+ }
+ if (root) {
+ data.fid.vnode = 1;
+ data.fid.unique = 1;
+ } else {
+ data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
+ data.fid.unique = 0;
+ }
- inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
- afs_iget5_autocell_test, afs_iget5_set,
+ inode = iget5_locked(sb, data.fid.vnode,
+ afs_iget5_pseudo_dir_test, afs_iget5_set,
&data);
if (!inode) {
_leave(" = -ENOMEM");
@@ -211,7 +214,12 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
inode->i_size = 0;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- inode->i_op = &afs_autocell_inode_operations;
+ if (root) {
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &afs_dynroot_file_operations;
+ } else {
+ inode->i_op = &afs_autocell_inode_operations;
+ }
set_nlink(inode, 2);
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
@@ -223,8 +231,12 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
inode->i_generation = 0;
set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT | S_NOATIME;
+ if (!root) {
+ set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+ inode->i_flags |= S_AUTOMOUNT;
+ }
+
+ inode->i_flags |= S_NOATIME;
unlock_new_inode(inode);
_leave(" = %p", inode);
return inode;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 804d1f905622..f38d6a561a84 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -36,6 +36,7 @@ struct afs_mount_params {
bool rwpath; /* T if the parent should be considered R/W */
bool force; /* T to force cell type */
bool autocell; /* T if set auto mount operation */
+ bool dyn_root; /* T if dynamic root */
afs_voltype_t type; /* type of volume requested */
int volnamesz; /* size of volume name */
const char *volname; /* name of volume to mount */
@@ -186,6 +187,7 @@ struct afs_super_info {
struct afs_net *net; /* Network namespace */
struct afs_cell *cell; /* The cell in which the volume resides */
struct afs_volume *volume; /* volume record */
+ bool dyn_root; /* True if dynamic root */
};
static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
@@ -634,10 +636,13 @@ extern bool afs_cm_incoming_call(struct afs_call *);
/*
* dir.c
*/
-extern bool afs_dir_check_page(struct inode *, struct page *);
+extern const struct file_operations afs_dir_file_operations;
extern const struct inode_operations afs_dir_inode_operations;
+extern const struct file_operations afs_dynroot_file_operations;
+extern const struct inode_operations afs_dynroot_inode_operations;
extern const struct dentry_operations afs_fs_dentry_operations;
-extern const struct file_operations afs_dir_file_operations;
+
+extern bool afs_dir_check_page(struct inode *, struct page *);
/*
* file.c
@@ -695,8 +700,7 @@ extern int afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
*/
extern int afs_fetch_status(struct afs_vnode *, struct key *);
extern int afs_iget5_test(struct inode *, void *);
-extern struct inode *afs_iget_autocell(struct inode *, const char *, int,
- struct key *);
+extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
extern struct inode *afs_iget(struct super_block *, struct key *,
struct afs_fid *, struct afs_file_status *,
struct afs_callback *,
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 690fea9d84c3..99fd13500a97 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -72,7 +72,7 @@ static int afs_mntpt_open(struct inode *inode, struct file *file)
*/
static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
- struct afs_super_info *super;
+ struct afs_super_info *as;
struct vfsmount *mnt;
struct afs_vnode *vnode;
struct page *page;
@@ -104,13 +104,13 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
goto error_no_page;
if (mntpt->d_name.name[0] == '.') {
- devname[0] = '#';
- memcpy(devname + 1, mntpt->d_name.name, size - 1);
+ devname[0] = '%';
+ memcpy(devname + 1, mntpt->d_name.name + 1, size - 1);
memcpy(devname + size, afs_root_cell,
sizeof(afs_root_cell));
rwpath = true;
} else {
- devname[0] = '%';
+ devname[0] = '#';
memcpy(devname + 1, mntpt->d_name.name, size);
memcpy(devname + size + 1, afs_root_cell,
sizeof(afs_root_cell));
@@ -142,11 +142,13 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
}
/* work out what options we want */
- super = AFS_FS_S(mntpt->d_sb);
- memcpy(options, "cell=", 5);
- strcpy(options + 5, super->volume->cell->name);
- if (super->volume->type == AFSVL_RWVOL || rwpath)
- strcat(options, ",rwpath");
+ as = AFS_FS_S(mntpt->d_sb);
+ if (as->cell) {
+ memcpy(options, "cell=", 5);
+ strcpy(options + 5, as->cell->name);
+ if ((as->volume && as->volume->type == AFSVL_RWVOL) || rwpath)
+ strcat(options, ",rwpath");
+ }
/* try and do the mount */
_debug("--- attempting mount %s -o %s ---", devname, options);
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d04511fb3879..ad1328d85526 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -330,26 +330,6 @@ start:
if (!afs_start_fs_iteration(fc, vnode))
goto failed;
- goto use_server;
-
-next_server:
- _debug("next");
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- fc->index++;
- if (fc->index >= fc->server_list->nr_servers)
- fc->index = 0;
- if (fc->index != fc->start)
- goto use_server;
-
- /* That's all the servers poked to no good effect. Try again if some
- * of them were busy.
- */
- if (fc->flags & AFS_FS_CURSOR_VBUSY)
- goto restart_from_beginning;
-
- fc->ac.error = -EDESTADDRREQ;
- goto failed;
use_server:
_debug("use");
@@ -383,6 +363,7 @@ use_server:
afs_get_addrlist(alist);
read_unlock(&server->fs_lock);
+ memset(&fc->ac, 0, sizeof(fc->ac));
/* Probe the current fileserver if we haven't done so yet. */
if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
@@ -397,12 +378,8 @@ use_server:
else
afs_put_addrlist(alist);
- fc->ac.addr = NULL;
fc->ac.start = READ_ONCE(alist->index);
fc->ac.index = fc->ac.start;
- fc->ac.error = 0;
- fc->ac.begun = false;
- goto iterate_address;
iterate_address:
ASSERT(fc->ac.alist);
@@ -410,16 +387,35 @@ iterate_address:
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
- if (afs_iterate_addresses(&fc->ac)) {
- _leave(" = t");
- return true;
- }
+ if (!afs_iterate_addresses(&fc->ac))
+ goto next_server;
+
+ _leave(" = t");
+ return true;
+next_server:
+ _debug("next");
afs_end_cursor(&fc->ac);
- goto next_server;
+ afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
+ fc->cbi = NULL;
+ fc->index++;
+ if (fc->index >= fc->server_list->nr_servers)
+ fc->index = 0;
+ if (fc->index != fc->start)
+ goto use_server;
+
+ /* That's all the servers poked to no good effect. Try again if some
+ * of them were busy.
+ */
+ if (fc->flags & AFS_FS_CURSOR_VBUSY)
+ goto restart_from_beginning;
+
+ fc->ac.error = -EDESTADDRREQ;
+ goto failed;
failed:
fc->flags |= AFS_FS_CURSOR_STOP;
+ afs_end_cursor(&fc->ac);
_leave(" = f [failed %d]", fc->ac.error);
return false;
}
@@ -458,12 +454,10 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
return false;
}
+ memset(&fc->ac, 0, sizeof(fc->ac));
fc->ac.alist = alist;
- fc->ac.addr = NULL;
fc->ac.start = READ_ONCE(alist->index);
fc->ac.index = fc->ac.start;
- fc->ac.error = 0;
- fc->ac.begun = false;
goto iterate_address;
case 0:
@@ -520,238 +514,3 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
return fc->ac.error;
}
-
-#if 0
-/*
- * Set a filesystem server cursor for using a specific FS server.
- */
-int afs_set_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
-{
- afs_init_fs_cursor(fc, vnode);
-
- read_seqlock_excl(&vnode->cb_lock);
- if (vnode->cb_interest) {
- if (vnode->cb_interest->server->fs_state == 0)
- fc->server = afs_get_server(vnode->cb_interest->server);
- else
- fc->ac.error = vnode->cb_interest->server->fs_state;
- } else {
- fc->ac.error = -ESTALE;
- }
- read_sequnlock_excl(&vnode->cb_lock);
-
- return fc->ac.error;
-}
-
-/*
- * pick a server to use to try accessing this volume
- * - returns with an elevated usage count on the server chosen
- */
-bool afs_volume_pick_fileserver(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
-{
- struct afs_volume *volume = vnode->volume;
- struct afs_server *server;
- int ret, state, loop;
-
- _enter("%s", volume->vlocation->vldb.name);
-
- /* stick with the server we're already using if we can */
- if (vnode->cb_interest && vnode->cb_interest->server->fs_state == 0) {
- fc->server = afs_get_server(vnode->cb_interest->server);
- goto set_server;
- }
-
- down_read(&volume->server_sem);
-
- /* handle the no-server case */
- if (volume->nservers == 0) {
- fc->ac.error = volume->rjservers ? -ENOMEDIUM : -ESTALE;
- up_read(&volume->server_sem);
- _leave(" = f [no servers %d]", fc->ac.error);
- return false;
- }
-
- /* basically, just search the list for the first live server and use
- * that */
- ret = 0;
- for (loop = 0; loop < volume->nservers; loop++) {
- server = volume->servers[loop];
- state = server->fs_state;
-
- _debug("consider %d [%d]", loop, state);
-
- switch (state) {
- case 0:
- goto picked_server;
-
- case -ENETUNREACH:
- if (ret == 0)
- ret = state;
- break;
-
- case -EHOSTUNREACH:
- if (ret == 0 ||
- ret == -ENETUNREACH)
- ret = state;
- break;
-
- case -ECONNREFUSED:
- if (ret == 0 ||
- ret == -ENETUNREACH ||
- ret == -EHOSTUNREACH)
- ret = state;
- break;
-
- default:
- case -EREMOTEIO:
- if (ret == 0 ||
- ret == -ENETUNREACH ||
- ret == -EHOSTUNREACH ||
- ret == -ECONNREFUSED)
- ret = state;
- break;
- }
- }
-
-error:
- fc->ac.error = ret;
-
- /* no available servers
- * - TODO: handle the no active servers case better
- */
- up_read(&volume->server_sem);
- _leave(" = f [%d]", fc->ac.error);
- return false;
-
-picked_server:
- /* Found an apparently healthy server. We need to register an interest
- * in receiving callbacks before we talk to it.
- */
- ret = afs_register_server_cb_interest(vnode,
- &volume->cb_interests[loop], server);
- if (ret < 0)
- goto error;
-
- fc->server = afs_get_server(server);
- up_read(&volume->server_sem);
-set_server:
- fc->ac.alist = afs_get_addrlist(fc->server->addrs);
- fc->ac.addr = &fc->ac.alist->addrs[0];
- _debug("USING SERVER: %pIS\n", &fc->ac.addr->transport);
- _leave(" = t (picked %pIS)", &fc->ac.addr->transport);
- return true;
-}
-
-/*
- * release a server after use
- * - releases the ref on the server struct that was acquired by picking
- * - records result of using a particular server to access a volume
- * - return true to try again, false if okay or to issue error
- * - the caller must release the server struct if result was false
- */
-bool afs_iterate_fs_cursor(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode)
-{
- struct afs_volume *volume = vnode->volume;
- struct afs_server *server = fc->server;
- unsigned loop;
-
- _enter("%s,%pIS,%d",
- volume->vlocation->vldb.name, &fc->ac.addr->transport,
- fc->ac.error);
-
- switch (fc->ac.error) {
- /* success */
- case 0:
- server->fs_state = 0;
- _leave(" = f");
- return false;
-
- /* the fileserver denied all knowledge of the volume */
- case -ENOMEDIUM:
- down_write(&volume->server_sem);
-
- /* firstly, find where the server is in the active list (if it
- * is) */
- for (loop = 0; loop < volume->nservers; loop++)
- if (volume->servers[loop] == server)
- goto present;
-
- /* no longer there - may have been discarded by another op */
- goto try_next_server_upw;
-
- present:
- volume->nservers--;
- memmove(&volume->servers[loop],
- &volume->servers[loop + 1],
- sizeof(volume->servers[loop]) *
- (volume->nservers - loop));
- volume->servers[volume->nservers] = NULL;
- afs_put_server(afs_v2net(vnode), server);
- volume->rjservers++;
-
- if (volume->nservers > 0)
- /* another server might acknowledge its existence */
- goto try_next_server_upw;
-
- /* handle the case where all the fileservers have rejected the
- * volume
- * - TODO: try asking the fileservers for volume information
- * - TODO: contact the VL server again to see if the volume is
- * no longer registered
- */
- up_write(&volume->server_sem);
- afs_put_server(afs_v2net(vnode), server);
- fc->server = NULL;
- _leave(" = f [completely rejected]");
- return false;
-
- /* problem reaching the server */
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -ECONNREFUSED:
- case -ETIME:
- case -ETIMEDOUT:
- case -EREMOTEIO:
- /* mark the server as dead
- * TODO: vary dead timeout depending on error
- */
- spin_lock(&server->fs_lock);
- if (!server->fs_state) {
- server->fs_state = fc->ac.error;
- printk("kAFS: SERVER DEAD state=%d\n", fc->ac.error);
- }
- spin_unlock(&server->fs_lock);
- goto try_next_server;
-
- /* miscellaneous error */
- default:
- case -ENOMEM:
- case -ENONET:
- /* tell the caller to accept the result */
- afs_put_server(afs_v2net(vnode), server);
- fc->server = NULL;
- _leave(" = f [local failure]");
- return false;
- }
-
- /* tell the caller to loop around and try the next server */
-try_next_server_upw:
- up_write(&volume->server_sem);
-try_next_server:
- afs_put_server(afs_v2net(vnode), server);
- _leave(" = t [try next server]");
- return true;
-}
-
-/*
- * Clean up a fileserver cursor.
- */
-int afs_end_fs_cursor(struct afs_fs_cursor *fc, struct afs_net *net)
-{
- afs_end_cursor(&fc->ac);
- afs_put_server(net, fc->server);
- return fc->ac.error;
-}
-
-#endif
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 0ab3f8457839..0f8dc4c8f07c 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -58,7 +58,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
server = afs_lookup_server(cell, key, &vldb->fs_server[i]);
if (IS_ERR(server)) {
ret = PTR_ERR(server);
- if (ret == -ENOENT)
+ if (ret == -ENOENT ||
+ ret == -ENOMEDIUM)
continue;
goto error_2;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 1037dd41a622..3623c952b6ff 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -64,6 +64,7 @@ static atomic_t afs_count_active_inodes;
enum {
afs_no_opt,
afs_opt_cell,
+ afs_opt_dyn,
afs_opt_rwpath,
afs_opt_vol,
afs_opt_autocell,
@@ -71,6 +72,7 @@ enum {
static const match_table_t afs_options_list = {
{ afs_opt_cell, "cell=%s" },
+ { afs_opt_dyn, "dyn" },
{ afs_opt_rwpath, "rwpath" },
{ afs_opt_vol, "vol=%s" },
{ afs_opt_autocell, "autocell" },
@@ -148,6 +150,11 @@ static int afs_show_devname(struct seq_file *m, struct dentry *root)
const char *suf = "";
char pref = '%';
+ if (as->dyn_root) {
+ seq_puts(m, "none");
+ return 0;
+ }
+
switch (volume->type) {
case AFSVL_RWVOL:
break;
@@ -171,8 +178,12 @@ static int afs_show_devname(struct seq_file *m, struct dentry *root)
*/
static int afs_show_options(struct seq_file *m, struct dentry *root)
{
+ struct afs_super_info *as = AFS_FS_S(root->d_sb);
+
+ if (as->dyn_root)
+ seq_puts(m, ",dyn");
if (test_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(d_inode(root))->flags))
- seq_puts(m, "autocell");
+ seq_puts(m, ",autocell");
return 0;
}
@@ -212,7 +223,7 @@ static int afs_parse_options(struct afs_mount_params *params,
break;
case afs_opt_rwpath:
- params->rwpath = 1;
+ params->rwpath = true;
break;
case afs_opt_vol:
@@ -220,7 +231,11 @@ static int afs_parse_options(struct afs_mount_params *params,
break;
case afs_opt_autocell:
- params->autocell = 1;
+ params->autocell = true;
+ break;
+
+ case afs_opt_dyn:
+ params->dyn_root = true;
break;
default:
@@ -254,7 +269,7 @@ static int afs_parse_device_name(struct afs_mount_params *params,
int cellnamesz;
_enter(",%s", name);
-
+
if (!name) {
printk(KERN_ERR "kAFS: no volume name specified\n");
return -EINVAL;
@@ -336,7 +351,14 @@ static int afs_test_super(struct super_block *sb, void *data)
struct afs_super_info *as1 = data;
struct afs_super_info *as = AFS_FS_S(sb);
- return as->net == as1->net && as->volume->vid == as1->volume->vid;
+ return (as->net == as1->net &&
+ as->volume &&
+ as->volume->vid == as1->volume->vid);
+}
+
+static int afs_dynroot_test_super(struct super_block *sb, void *data)
+{
+ return false;
}
static int afs_set_super(struct super_block *sb, void *data)
@@ -365,24 +387,30 @@ static int afs_fill_super(struct super_block *sb,
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = AFS_FS_MAGIC;
sb->s_op = &afs_super_ops;
- sb->s_xattr = afs_xattr_handlers;
+ if (!as->dyn_root)
+ sb->s_xattr = afs_xattr_handlers;
ret = super_setup_bdi(sb);
if (ret)
return ret;
sb->s_bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
- sprintf(sb->s_id, "%u", as->volume->vid);
-
- afs_activate_volume(as->volume);
/* allocate the root inode and dentry */
- fid.vid = as->volume->vid;
- fid.vnode = 1;
- fid.unique = 1;
- inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL);
+ if (as->dyn_root) {
+ inode = afs_iget_pseudo_dir(sb, true);
+ sb->s_flags |= SB_RDONLY;
+ } else {
+ sprintf(sb->s_id, "%u", as->volume->vid);
+ afs_activate_volume(as->volume);
+ fid.vid = as->volume->vid;
+ fid.vnode = 1;
+ fid.unique = 1;
+ inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL);
+ }
+
if (IS_ERR(inode))
return PTR_ERR(inode);
- if (params->autocell)
+ if (params->autocell || params->dyn_root)
set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
ret = -ENOMEM;
@@ -407,7 +435,10 @@ static struct afs_super_info *afs_alloc_sbi(struct afs_mount_params *params)
as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
if (as) {
as->net = afs_get_net(params->net);
- as->cell = afs_get_cell(params->cell);
+ if (params->dyn_root)
+ as->dyn_root = true;
+ else
+ as->cell = afs_get_cell(params->cell);
}
return as;
}
@@ -451,18 +482,20 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
goto error;
}
- ret = afs_parse_device_name(&params, dev_name);
- if (ret < 0)
- goto error;
+ if (!params.dyn_root) {
+ ret = afs_parse_device_name(&params, dev_name);
+ if (ret < 0)
+ goto error;
- /* try and do the mount securely */
- key = afs_request_key(params.cell);
- if (IS_ERR(key)) {
- _leave(" = %ld [key]", PTR_ERR(key));
- ret = PTR_ERR(key);
- goto error;
+ /* try and do the mount securely */
+ key = afs_request_key(params.cell);
+ if (IS_ERR(key)) {
+ _leave(" = %ld [key]", PTR_ERR(key));
+ ret = PTR_ERR(key);
+ goto error;
+ }
+ params.key = key;
}
- params.key = key;
/* allocate a superblock info record */
ret = -ENOMEM;
@@ -470,20 +503,25 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
if (!as)
goto error_key;
- /* Assume we're going to need a volume record; at the very least we can
- * use it to update the volume record if we have one already. This
- * checks that the volume exists within the cell.
- */
- candidate = afs_create_volume(&params);
- if (IS_ERR(candidate)) {
- ret = PTR_ERR(candidate);
- goto error_as;
- }
+ if (!params.dyn_root) {
+ /* Assume we're going to need a volume record; at the very
+ * least we can use it to update the volume record if we have
+ * one already. This checks that the volume exists within the
+ * cell.
+ */
+ candidate = afs_create_volume(&params);
+ if (IS_ERR(candidate)) {
+ ret = PTR_ERR(candidate);
+ goto error_as;
+ }
- as->volume = candidate;
+ as->volume = candidate;
+ }
/* allocate a deviceless superblock */
- sb = sget(fs_type, afs_test_super, afs_set_super, flags, as);
+ sb = sget(fs_type,
+ as->dyn_root ? afs_dynroot_test_super : afs_test_super,
+ afs_set_super, flags, as);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
goto error_as;
@@ -529,9 +567,11 @@ static void afs_kill_super(struct super_block *sb)
/* Clear the callback interests (which will do ilookup5) before
* deactivating the superblock.
*/
- afs_clear_callback_interests(as->net, as->volume->servers);
+ if (as->volume)
+ afs_clear_callback_interests(as->net, as->volume->servers);
kill_anon_super(sb);
- afs_deactivate_volume(as->volume);
+ if (as->volume)
+ afs_deactivate_volume(as->volume);
afs_destroy_sbi(as);
}
@@ -619,12 +659,24 @@ static void afs_destroy_inode(struct inode *inode)
*/
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
+ struct afs_super_info *as = AFS_FS_S(dentry->d_sb);
struct afs_fs_cursor fc;
struct afs_volume_status vs;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
struct key *key;
int ret;
+ buf->f_type = dentry->d_sb->s_magic;
+ buf->f_bsize = AFS_BLOCK_SIZE;
+ buf->f_namelen = AFSNAMEMAX - 1;
+
+ if (as->dyn_root) {
+ buf->f_blocks = 1;
+ buf->f_bavail = 0;
+ buf->f_bfree = 0;
+ return 0;
+ }
+
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key))
return PTR_ERR(key);
@@ -645,10 +697,6 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
key_put(key);
if (ret == 0) {
- buf->f_type = dentry->d_sb->s_magic;
- buf->f_bsize = AFS_BLOCK_SIZE;
- buf->f_namelen = AFSNAMEMAX - 1;
-
if (vs.max_quota == 0)
buf->f_blocks = vs.part_max_blocks;
else
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index e372f89fd36a..5d8562f1ad4a 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -23,7 +23,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
struct afs_uvldbentry__xdr *uvldb;
struct afs_vldb_entry *entry;
bool new_only = false;
- u32 tmp;
+ u32 tmp, nr_servers;
int i, ret;
_enter("");
@@ -36,6 +36,10 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
uvldb = call->buffer;
entry = call->reply[0];
+ nr_servers = ntohl(uvldb->nServers);
+ if (nr_servers > AFS_NMAXNSERVERS)
+ nr_servers = AFS_NMAXNSERVERS;
+
for (i = 0; i < ARRAY_SIZE(uvldb->name) - 1; i++)
entry->name[i] = (u8)ntohl(uvldb->name[i]);
entry->name[i] = 0;
@@ -44,14 +48,14 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
/* If there is a new replication site that we can use, ignore all the
* sites that aren't marked as new.
*/
- for (i = 0; i < AFS_NMAXNSERVERS; i++) {
+ for (i = 0; i < nr_servers; i++) {
tmp = ntohl(uvldb->serverFlags[i]);
if (!(tmp & AFS_VLSF_DONTUSE) &&
(tmp & AFS_VLSF_NEWREPSITE))
new_only = true;
}
- for (i = 0; i < AFS_NMAXNSERVERS; i++) {
+ for (i = 0; i < nr_servers; i++) {
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
int j;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 684c48293353..b517a588781f 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -26,9 +26,8 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
unsigned long type_mask)
{
struct afs_server_list *slist;
- struct afs_server *server;
struct afs_volume *volume;
- int ret = -ENOMEM, nr_servers = 0, i, j;
+ int ret = -ENOMEM, nr_servers = 0, i;
for (i = 0; i < vldb->nr_servers; i++)
if (vldb->fs_mask[i] & type_mask)
@@ -58,50 +57,10 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params,
refcount_set(&slist->usage, 1);
volume->servers = slist;
-
- /* Make sure a records exists for each server this volume occupies. */
- for (i = 0; i < nr_servers; i++) {
- if (!(vldb->fs_mask[i] & type_mask))
- continue;
-
- server = afs_lookup_server(params->cell, params->key,
- &vldb->fs_server[i]);
- if (IS_ERR(server)) {
- ret = PTR_ERR(server);
- if (ret == -ENOENT)
- continue;
- goto error_2;
- }
-
- /* Insertion-sort by server pointer */
- for (j = 0; j < slist->nr_servers; j++)
- if (slist->servers[j].server >= server)
- break;
- if (j < slist->nr_servers) {
- if (slist->servers[j].server == server) {
- afs_put_server(params->net, server);
- continue;
- }
-
- memmove(slist->servers + j + 1,
- slist->servers + j,
- (slist->nr_servers - j) * sizeof(struct afs_server_entry));
- }
-
- slist->servers[j].server = server;
- slist->nr_servers++;
- }
-
- if (slist->nr_servers == 0) {
- ret = -EDESTADDRREQ;
- goto error_2;
- }
-
return volume;
-error_2:
- afs_put_serverlist(params->net, slist);
error_1:
+ afs_put_cell(params->net, volume->cell);
kfree(volume);
error_0:
return ERR_PTR(ret);
@@ -327,7 +286,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/* See if the volume's server list got updated. */
new = afs_alloc_server_list(volume->cell, key,
- vldb, (1 << volume->type));
+ vldb, (1 << volume->type));
if (IS_ERR(new)) {
ret = PTR_ERR(new);
goto error_vldb;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index dbf07051aacd..b4336b42ce3b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -299,7 +299,8 @@ unlock:
* start an async read(ahead) operation. return nr_pages we submitted
* a read for on success, or negative error code.
*/
-static int start_read(struct inode *inode, struct list_head *page_list, int max)
+static int start_read(struct inode *inode, struct ceph_rw_context *rw_ctx,
+ struct list_head *page_list, int max)
{
struct ceph_osd_client *osdc =
&ceph_inode_to_client(inode)->client->osdc;
@@ -316,7 +317,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
int got = 0;
int ret = 0;
- if (!current->journal_info) {
+ if (!rw_ctx) {
/* caller of readpages does not hold buffer and read caps
* (fadvise, madvise and readahead cases) */
int want = CEPH_CAP_FILE_CACHE;
@@ -437,6 +438,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
{
struct inode *inode = file_inode(file);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_file_info *ci = file->private_data;
+ struct ceph_rw_context *rw_ctx;
int rc = 0;
int max = 0;
@@ -449,11 +452,12 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
if (rc == 0)
goto out;
+ rw_ctx = ceph_find_rw_context(ci);
max = fsc->mount_options->rsize >> PAGE_SHIFT;
- dout("readpages %p file %p nr_pages %d max %d\n",
- inode, file, nr_pages, max);
+ dout("readpages %p file %p ctx %p nr_pages %d max %d\n",
+ inode, file, rw_ctx, nr_pages, max);
while (!list_empty(page_list)) {
- rc = start_read(inode, page_list, max);
+ rc = start_read(inode, rw_ctx, page_list, max);
if (rc < 0)
goto out;
}
@@ -574,7 +578,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
struct ceph_fs_client *fsc;
struct ceph_snap_context *snapc, *oldest;
loff_t page_off = page_offset(page);
- long writeback_stat;
int err, len = PAGE_SIZE;
struct ceph_writeback_ctl ceph_wbc;
@@ -615,8 +618,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p page %p index %lu on %llu~%u snapc %p seq %lld\n",
inode, page, page->index, page_off, len, snapc, snapc->seq);
- writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
- if (writeback_stat >
+ if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
@@ -651,6 +653,11 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
end_page_writeback(page);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
ceph_put_snap_context(snapc); /* page's reference */
+
+ if (atomic_long_dec_return(&fsc->writeback_count) <
+ CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
+ clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
+
return err;
}
@@ -1450,9 +1457,10 @@ static int ceph_filemap_fault(struct vm_fault *vmf)
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
ci->i_inline_version == CEPH_INLINE_NONE) {
- current->journal_info = vma->vm_file;
+ CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
+ ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
- current->journal_info = NULL;
+ ceph_del_rw_context(fi, &rw_ctx);
} else
ret = -EAGAIN;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index a14b2c974c9e..6582c4507e6c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -154,13 +154,19 @@ void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
spin_unlock(&mdsc->caps_list_lock);
}
-void ceph_reserve_caps(struct ceph_mds_client *mdsc,
+/*
+ * Called under mdsc->mutex.
+ */
+int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need)
{
- int i;
+ int i, j;
struct ceph_cap *cap;
int have;
int alloc = 0;
+ int max_caps;
+ bool trimmed = false;
+ struct ceph_mds_session *s;
LIST_HEAD(newcaps);
dout("reserve caps ctx=%p need=%d\n", ctx, need);
@@ -179,16 +185,37 @@ void ceph_reserve_caps(struct ceph_mds_client *mdsc,
spin_unlock(&mdsc->caps_list_lock);
for (i = have; i < need; i++) {
+retry:
cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
- if (!cap)
- break;
+ if (!cap) {
+ if (!trimmed) {
+ for (j = 0; j < mdsc->max_sessions; j++) {
+ s = __ceph_lookup_mds_session(mdsc, j);
+ if (!s)
+ continue;
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&s->s_mutex);
+ max_caps = s->s_nr_caps - (need - i);
+ ceph_trim_caps(mdsc, s, max_caps);
+ mutex_unlock(&s->s_mutex);
+
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+ trimmed = true;
+ goto retry;
+ } else {
+ pr_warn("reserve caps ctx=%p ENOMEM "
+ "need=%d got=%d\n",
+ ctx, need, have + alloc);
+ goto out_nomem;
+ }
+ }
list_add(&cap->caps_item, &newcaps);
alloc++;
}
- /* we didn't manage to reserve as much as we needed */
- if (have + alloc != need)
- pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
- ctx, need, have + alloc);
+ BUG_ON(have + alloc != need);
spin_lock(&mdsc->caps_list_lock);
mdsc->caps_total_count += alloc;
@@ -204,6 +231,24 @@ void ceph_reserve_caps(struct ceph_mds_client *mdsc,
dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
ctx, mdsc->caps_total_count, mdsc->caps_use_count,
mdsc->caps_reserve_count, mdsc->caps_avail_count);
+ return 0;
+
+out_nomem:
+ while (!list_empty(&newcaps)) {
+ cap = list_first_entry(&newcaps,
+ struct ceph_cap, caps_item);
+ list_del(&cap->caps_item);
+ kmem_cache_free(ceph_cap_cachep, cap);
+ }
+
+ spin_lock(&mdsc->caps_list_lock);
+ mdsc->caps_avail_count += have;
+ mdsc->caps_reserve_count -= have;
+ BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
+ mdsc->caps_reserve_count +
+ mdsc->caps_avail_count);
+ spin_unlock(&mdsc->caps_list_lock);
+ return -ENOMEM;
}
int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
@@ -498,7 +543,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
*/
if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
if (issued & CEPH_CAP_FILE_SHARED)
- ci->i_shared_gen++;
+ atomic_inc(&ci->i_shared_gen);
if (S_ISDIR(ci->vfs_inode.i_mode)) {
dout(" marking %p NOT complete\n", &ci->vfs_inode);
__ceph_dir_clear_complete(ci);
@@ -577,18 +622,30 @@ void ceph_add_cap(struct inode *inode,
}
}
- if (!ci->i_snap_realm) {
+ if (!ci->i_snap_realm ||
+ ((flags & CEPH_CAP_FLAG_AUTH) &&
+ realmino != (u64)-1 && ci->i_snap_realm->ino != realmino)) {
/*
* add this inode to the appropriate snap realm
*/
struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
realmino);
if (realm) {
+ struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
+ if (oldrealm) {
+ spin_lock(&oldrealm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ spin_unlock(&oldrealm->inodes_with_caps_lock);
+ }
+
spin_lock(&realm->inodes_with_caps_lock);
ci->i_snap_realm = realm;
list_add(&ci->i_snap_realm_item,
&realm->inodes_with_caps);
spin_unlock(&realm->inodes_with_caps_lock);
+
+ if (oldrealm)
+ ceph_put_snap_realm(mdsc, oldrealm);
} else {
pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
realmino);
@@ -890,6 +947,11 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check)
/*
* called under i_ceph_lock
*/
+static int __ceph_is_single_caps(struct ceph_inode_info *ci)
+{
+ return rb_first(&ci->i_caps) == rb_last(&ci->i_caps);
+}
+
static int __ceph_is_any_caps(struct ceph_inode_info *ci)
{
return !RB_EMPTY_ROOT(&ci->i_caps);
@@ -1703,21 +1765,24 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
int mds = -1; /* keep track of how far we've gone through i_caps list
to avoid an infinite loop on retry */
struct rb_node *p;
- int delayed = 0, sent = 0, num;
- bool is_delayed = flags & CHECK_CAPS_NODELAY;
+ int delayed = 0, sent = 0;
+ bool no_delay = flags & CHECK_CAPS_NODELAY;
bool queue_invalidate = false;
- bool force_requeue = false;
bool tried_invalidate = false;
/* if we are unmounting, flush any unused caps immediately. */
if (mdsc->stopping)
- is_delayed = true;
+ no_delay = true;
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
+ if (!(flags & CHECK_CAPS_AUTHONLY) ||
+ (ci->i_auth_cap && __ceph_is_single_caps(ci)))
+ __cap_delay_cancel(mdsc, ci);
+
goto retry_locked;
retry:
spin_lock(&ci->i_ceph_lock);
@@ -1772,7 +1837,7 @@ retry_locked:
* have cached pages, but don't want them, then try to invalidate.
* If we fail, it's because pages are locked.... try again later.
*/
- if ((!is_delayed || mdsc->stopping) &&
+ if ((!no_delay || mdsc->stopping) &&
!S_ISDIR(inode->i_mode) && /* ignore readdir cache */
!(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
inode->i_data.nrpages && /* have cached pages */
@@ -1781,27 +1846,16 @@ retry_locked:
!tried_invalidate) {
dout("check_caps trying to invalidate on %p\n", inode);
if (try_nonblocking_invalidate(inode) < 0) {
- if (revoking & (CEPH_CAP_FILE_CACHE|
- CEPH_CAP_FILE_LAZYIO)) {
- dout("check_caps queuing invalidate\n");
- queue_invalidate = true;
- ci->i_rdcache_revoking = ci->i_rdcache_gen;
- } else {
- dout("check_caps failed to invalidate pages\n");
- /* we failed to invalidate pages. check these
- caps again later. */
- force_requeue = true;
- __cap_set_timeouts(mdsc, ci);
- }
+ dout("check_caps queuing invalidate\n");
+ queue_invalidate = true;
+ ci->i_rdcache_revoking = ci->i_rdcache_gen;
}
tried_invalidate = true;
goto retry_locked;
}
- num = 0;
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
- num++;
/* avoid looping forever */
if (mds >= cap->mds ||
@@ -1864,7 +1918,7 @@ retry_locked:
cap->mds_wanted == want)
continue; /* nope, all good */
- if (is_delayed)
+ if (no_delay)
goto ack;
/* delay? */
@@ -1955,15 +2009,8 @@ ack:
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
- /*
- * Reschedule delayed caps release if we delayed anything,
- * otherwise cancel.
- */
- if (delayed && is_delayed)
- force_requeue = true; /* __send_cap delayed release; requeue */
- if (!delayed && !is_delayed)
- __cap_delay_cancel(mdsc, ci);
- else if (!is_delayed || force_requeue)
+ /* Reschedule delayed caps release if we delayed anything */
+ if (delayed)
__cap_delay_requeue(mdsc, ci);
spin_unlock(&ci->i_ceph_lock);
@@ -2160,7 +2207,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
u64 flush_tid;
int err = 0;
int dirty;
- int wait = wbc->sync_mode == WB_SYNC_ALL;
+ int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync);
dout("write_inode %p wait=%d\n", inode, wait);
if (wait) {
@@ -3426,7 +3473,14 @@ retry:
*/
issued = cap->issued;
- WARN_ON(issued != cap->implemented);
+ if (issued != cap->implemented)
+ pr_err_ratelimited("handle_cap_export: issued != implemented: "
+ "ino (%llx.%llx) mds%d seq %d mseq %d "
+ "issued %s implemented %s\n",
+ ceph_vinop(inode), mds, cap->seq, cap->mseq,
+ ceph_cap_string(issued),
+ ceph_cap_string(cap->implemented));
+
tcap = __get_cap_for_mds(ci, target);
if (tcap) {
@@ -3572,12 +3626,13 @@ retry:
if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
(ocap->seq != le32_to_cpu(ph->seq) ||
ocap->mseq != le32_to_cpu(ph->mseq))) {
- pr_err("handle_cap_import: mismatched seq/mseq: "
- "ino (%llx.%llx) mds%d seq %d mseq %d "
- "importer mds%d has peer seq %d mseq %d\n",
- ceph_vinop(inode), peer, ocap->seq,
- ocap->mseq, mds, le32_to_cpu(ph->seq),
- le32_to_cpu(ph->mseq));
+ pr_err_ratelimited("handle_cap_import: "
+ "mismatched seq/mseq: ino (%llx.%llx) "
+ "mds%d seq %d mseq %d importer mds%d "
+ "has peer seq %d mseq %d\n",
+ ceph_vinop(inode), peer, ocap->seq,
+ ocap->mseq, mds, le32_to_cpu(ph->seq),
+ le32_to_cpu(ph->mseq));
}
__ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
@@ -3939,11 +3994,20 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
cap = __get_cap_for_mds(ci, mds);
if (cap && __cap_is_valid(cap)) {
- if (force ||
- ((cap->issued & drop) &&
- (cap->issued & unless) == 0)) {
- if ((cap->issued & drop) &&
- (cap->issued & unless) == 0) {
+ unless &= cap->issued;
+ if (unless) {
+ if (unless & CEPH_CAP_AUTH_EXCL)
+ drop &= ~CEPH_CAP_AUTH_SHARED;
+ if (unless & CEPH_CAP_LINK_EXCL)
+ drop &= ~CEPH_CAP_LINK_SHARED;
+ if (unless & CEPH_CAP_XATTR_EXCL)
+ drop &= ~CEPH_CAP_XATTR_SHARED;
+ if (unless & CEPH_CAP_FILE_EXCL)
+ drop &= ~CEPH_CAP_FILE_SHARED;
+ }
+
+ if (force || (cap->issued & drop)) {
+ if (cap->issued & drop) {
int wanted = __ceph_caps_wanted(ci);
if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
wanted |= cap->mds_wanted;
@@ -3975,7 +4039,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
*p += sizeof(*rel);
ret = 1;
} else {
- dout("encode_inode_release %p cap %p %s\n",
+ dout("encode_inode_release %p cap %p %s (noop)\n",
inode, cap, ceph_cap_string(cap->issued));
}
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 8a5266699b67..0c4346806e17 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -173,7 +173,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
* the MDS if/when the directory is modified).
*/
static int __dcache_readdir(struct file *file, struct dir_context *ctx,
- u32 shared_gen)
+ int shared_gen)
{
struct ceph_file_info *fi = file->private_data;
struct dentry *parent = file->f_path.dentry;
@@ -184,7 +184,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
u64 idx = 0;
int err = 0;
- dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
+ dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
/* search start position */
if (ctx->pos > 2) {
@@ -231,11 +231,17 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
goto out;
}
- di = ceph_dentry(dentry);
spin_lock(&dentry->d_lock);
- if (di->lease_shared_gen == shared_gen &&
- d_really_is_positive(dentry) &&
- fpos_cmp(ctx->pos, di->offset) <= 0) {
+ di = ceph_dentry(dentry);
+ if (d_unhashed(dentry) ||
+ d_really_is_negative(dentry) ||
+ di->lease_shared_gen != shared_gen) {
+ spin_unlock(&dentry->d_lock);
+ dput(dentry);
+ err = -EAGAIN;
+ goto out;
+ }
+ if (fpos_cmp(ctx->pos, di->offset) <= 0) {
emit_dentry = true;
}
spin_unlock(&dentry->d_lock);
@@ -333,7 +339,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete_ordered(ci) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
- u32 shared_gen = ci->i_shared_gen;
+ int shared_gen = atomic_read(&ci->i_shared_gen);
spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(file, ctx, shared_gen);
if (err != -EAGAIN)
@@ -381,6 +387,7 @@ more:
if (op == CEPH_MDS_OP_READDIR) {
req->r_direct_hash = ceph_frag_value(frag);
__set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
+ req->r_inode_drop = CEPH_CAP_FILE_EXCL;
}
if (fi->last_name) {
req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL);
@@ -750,7 +757,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
- di->lease_shared_gen = ci->i_shared_gen;
+ di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
return NULL;
}
spin_unlock(&ci->i_ceph_lock);
@@ -835,7 +842,7 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (acls.pagelist) {
req->r_pagelist = acls.pagelist;
@@ -887,7 +894,7 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
@@ -936,7 +943,7 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
req->r_parent = dir;
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_args.mkdir.mode = cpu_to_le32(mode);
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (acls.pagelist) {
req->r_pagelist = acls.pagelist;
@@ -983,7 +990,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_SHARED on source inode (mds will lock it) */
- req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
+ req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (err) {
d_drop(dentry);
@@ -1096,7 +1103,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_RDCACHE on source inode (mds will lock it) */
- req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
+ req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
if (d_really_is_positive(new_dentry))
req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry));
err = ceph_mdsc_do_request(mdsc, old_dir, req);
@@ -1106,16 +1113,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
* do_request, above). If there is no trace, we need
* to do it here.
*/
-
- /* d_move screws up sibling dentries' offsets */
- ceph_dir_clear_complete(old_dir);
- ceph_dir_clear_complete(new_dir);
-
d_move(old_dentry, new_dentry);
-
- /* ensure target dentry is invalidated, despite
- rehashing bug in vfs_rename_dir */
- ceph_invalidate_dentry_lease(new_dentry);
}
ceph_mdsc_put_request(req);
return err;
@@ -1199,12 +1197,12 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
int valid = 0;
spin_lock(&ci->i_ceph_lock);
- if (ci->i_shared_gen == di->lease_shared_gen)
+ if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
spin_unlock(&ci->i_ceph_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
- dir, (unsigned)ci->i_shared_gen, dentry,
- (unsigned)di->lease_shared_gen, valid);
+ dir, (unsigned)atomic_read(&ci->i_shared_gen),
+ dentry, (unsigned)di->lease_shared_gen, valid);
return valid;
}
@@ -1332,24 +1330,37 @@ static void ceph_d_release(struct dentry *dentry)
*/
static void ceph_d_prune(struct dentry *dentry)
{
- dout("ceph_d_prune %p\n", dentry);
+ struct ceph_inode_info *dir_ci;
+ struct ceph_dentry_info *di;
+
+ dout("ceph_d_prune %pd %p\n", dentry, dentry);
/* do we have a valid parent? */
if (IS_ROOT(dentry))
return;
- /* if we are not hashed, we don't affect dir's completeness */
- if (d_unhashed(dentry))
+ /* we hold d_lock, so d_parent is stable */
+ dir_ci = ceph_inode(d_inode(dentry->d_parent));
+ if (dir_ci->i_vino.snap == CEPH_SNAPDIR)
return;
- if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR)
+ /* who calls d_delete() should also disable dcache readdir */
+ if (d_really_is_negative(dentry))
return;
- /*
- * we hold d_lock, so d_parent is stable, and d_fsdata is never
- * cleared until d_release
- */
- ceph_dir_clear_complete(d_inode(dentry->d_parent));
+ /* d_fsdata does not get cleared until d_release */
+ if (!d_unhashed(dentry)) {
+ __ceph_dir_clear_complete(dir_ci);
+ return;
+ }
+
+ /* Disable dcache readdir just in case that someone called d_drop()
+ * or d_invalidate(), but MDS didn't revoke CEPH_CAP_FILE_SHARED
+ * properly (dcache readdir is still enabled) */
+ di = ceph_dentry(dentry);
+ if (di->offset > 0 &&
+ di->lease_shared_gen == atomic_read(&dir_ci->i_shared_gen))
+ __ceph_dir_clear_ordered(dir_ci);
}
/*
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 5c17125f45c7..6639926eed4e 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -181,6 +181,10 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
return -ENOMEM;
}
cf->fmode = fmode;
+
+ spin_lock_init(&cf->rw_contexts_lock);
+ INIT_LIST_HEAD(&cf->rw_contexts);
+
cf->next_offset = 2;
cf->readdir_cache_idx = -1;
file->private_data = cf;
@@ -396,7 +400,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
if (flags & O_CREAT) {
- req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
if (acls.pagelist) {
req->r_pagelist = acls.pagelist;
@@ -464,6 +468,7 @@ int ceph_release(struct inode *inode, struct file *file)
ceph_mdsc_put_request(cf->last_readdir);
kfree(cf->last_name);
kfree(cf->dir_info);
+ WARN_ON(!list_empty(&cf->rw_contexts));
kmem_cache_free(ceph_file_cachep, cf);
/* wake up anyone waiting for caps on this inode */
@@ -1199,12 +1204,13 @@ again:
retry_op = READ_INLINE;
}
} else {
+ CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
- current->journal_info = filp;
+ ceph_add_rw_context(fi, &rw_ctx);
ret = generic_file_read_iter(iocb, to);
- current->journal_info = NULL;
+ ceph_del_rw_context(fi, &rw_ctx);
}
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ab81652198c4..c6ec5aa46100 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -494,7 +494,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_wrbuffer_ref = 0;
ci->i_wrbuffer_ref_head = 0;
atomic_set(&ci->i_filelock_ref, 0);
- ci->i_shared_gen = 0;
+ atomic_set(&ci->i_shared_gen, 0);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
@@ -1041,7 +1041,7 @@ static void update_dentry_lease(struct dentry *dentry,
if (ceph_snap(dir) != CEPH_NOSNAP)
goto out_unlock;
- di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
+ di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
if (duration == 0)
goto out_unlock;
@@ -1080,6 +1080,27 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
BUG_ON(d_inode(dn));
+ if (S_ISDIR(in->i_mode)) {
+ /* If inode is directory, d_splice_alias() below will remove
+ * 'realdn' from its origin parent. We need to ensure that
+ * origin parent's readdir cache will not reference 'realdn'
+ */
+ realdn = d_find_any_alias(in);
+ if (realdn) {
+ struct ceph_dentry_info *di = ceph_dentry(realdn);
+ spin_lock(&realdn->d_lock);
+
+ realdn->d_op->d_prune(realdn);
+
+ di->time = jiffies;
+ di->lease_shared_gen = 0;
+ di->offset = 0;
+
+ spin_unlock(&realdn->d_lock);
+ dput(realdn);
+ }
+ }
+
/* dn must be unhashed */
if (!d_unhashed(dn))
d_drop(dn);
@@ -1295,8 +1316,8 @@ retry_lookup:
if (!rinfo->head->is_target) {
dout("fill_trace null dentry\n");
if (d_really_is_positive(dn)) {
- ceph_dir_clear_ordered(dir);
dout("d_delete %p\n", dn);
+ ceph_dir_clear_ordered(dir);
d_delete(dn);
} else if (have_lease) {
if (d_unhashed(dn))
@@ -1323,7 +1344,6 @@ retry_lookup:
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
dn, d_inode(dn), ceph_vinop(d_inode(dn)),
ceph_vinop(in));
- ceph_dir_clear_ordered(dir);
d_invalidate(dn);
have_lease = false;
}
@@ -1573,9 +1593,19 @@ retry_lookup:
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
+ struct ceph_dentry_info *di = ceph_dentry(dn);
dout(" dn %p points to wrong inode %p\n",
dn, d_inode(dn));
- __ceph_dir_clear_ordered(ci);
+
+ spin_lock(&dn->d_lock);
+ if (di->offset > 0 &&
+ di->lease_shared_gen ==
+ atomic_read(&ci->i_shared_gen)) {
+ __ceph_dir_clear_ordered(ci);
+ di->offset = 0;
+ }
+ spin_unlock(&dn->d_lock);
+
d_delete(dn);
dput(dn);
goto retry_lookup;
@@ -1600,9 +1630,7 @@ retry_lookup:
&req->r_caps_reservation);
if (ret < 0) {
pr_err("fill_inode badness on %p\n", in);
- if (d_really_is_positive(dn))
- __ceph_dir_clear_ordered(ci);
- else
+ if (d_really_is_negative(dn))
iput(in);
d_drop(dn);
err = ret;
@@ -2000,8 +2028,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
ceph_encode_timespec(&req->r_args.setattr.atime,
&attr->ia_atime);
mask |= CEPH_SETATTR_ATIME;
- release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR;
+ release |= CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
if (ia_valid & ATTR_MTIME) {
@@ -2022,8 +2050,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
ceph_encode_timespec(&req->r_args.setattr.mtime,
&attr->ia_mtime);
mask |= CEPH_SETATTR_MTIME;
- release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR;
+ release |= CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
if (ia_valid & ATTR_SIZE) {
@@ -2041,8 +2069,8 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
req->r_args.setattr.old_size =
cpu_to_le64(inode->i_size);
mask |= CEPH_SETATTR_SIZE;
- release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
- CEPH_CAP_FILE_WR;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
}
}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1b468250e947..2e8f90f96540 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -604,10 +604,20 @@ static void __register_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req,
struct inode *dir)
{
+ int ret = 0;
+
req->r_tid = ++mdsc->last_tid;
- if (req->r_num_caps)
- ceph_reserve_caps(mdsc, &req->r_caps_reservation,
- req->r_num_caps);
+ if (req->r_num_caps) {
+ ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
+ req->r_num_caps);
+ if (ret < 0) {
+ pr_err("__register_request %p "
+ "failed to reserve caps: %d\n", req, ret);
+ /* set req->r_err to fail early from __do_request */
+ req->r_err = ret;
+ return;
+ }
+ }
dout("__register_request %p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
insert_request(&mdsc->request_tree, req);
@@ -1545,9 +1555,9 @@ out:
/*
* Trim session cap count down to some max number.
*/
-static int trim_caps(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session,
- int max_caps)
+int ceph_trim_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int max_caps)
{
int trim_caps = session->s_nr_caps - max_caps;
@@ -2438,11 +2448,14 @@ out:
*/
void ceph_invalidate_dir_request(struct ceph_mds_request *req)
{
- struct inode *inode = req->r_parent;
+ struct inode *dir = req->r_parent;
+ struct inode *old_dir = req->r_old_dentry_dir;
- dout("invalidate_dir_request %p (complete, lease(s))\n", inode);
+ dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
- ceph_dir_clear_complete(inode);
+ ceph_dir_clear_complete(dir);
+ if (old_dir)
+ ceph_dir_clear_complete(old_dir);
if (req->r_dentry)
ceph_invalidate_dentry_lease(req->r_dentry);
if (req->r_old_dentry)
@@ -2773,7 +2786,7 @@ static void handle_session(struct ceph_mds_session *session,
break;
case CEPH_SESSION_RECALL_STATE:
- trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
+ ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
break;
case CEPH_SESSION_FLUSHMSG:
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 837ac4b087a0..71e3b783ee6f 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -444,4 +444,7 @@ ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
+extern int ceph_trim_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int max_caps);
#endif
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 8a2ca41e4b97..07cf95e6413d 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -922,13 +922,17 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
/*
* Move the inode to the new realm
*/
- spin_lock(&realm->inodes_with_caps_lock);
+ oldrealm = ci->i_snap_realm;
+ spin_lock(&oldrealm->inodes_with_caps_lock);
list_del_init(&ci->i_snap_realm_item);
+ spin_unlock(&oldrealm->inodes_with_caps_lock);
+
+ spin_lock(&realm->inodes_with_caps_lock);
list_add(&ci->i_snap_realm_item,
&realm->inodes_with_caps);
- oldrealm = ci->i_snap_realm;
ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
+
spin_unlock(&ci->i_ceph_lock);
ceph_get_snap_realm(mdsc, realm);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 2beeec07fa76..21b2e5b004eb 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -256,7 +256,8 @@ struct ceph_inode_xattr {
*/
struct ceph_dentry_info {
struct ceph_mds_session *lease_session;
- u32 lease_gen, lease_shared_gen;
+ int lease_shared_gen;
+ u32 lease_gen;
u32 lease_seq;
unsigned long lease_renew_after, lease_renew_from;
struct list_head lru;
@@ -353,7 +354,7 @@ struct ceph_inode_info {
int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
int i_wrbuffer_ref, i_wrbuffer_ref_head;
atomic_t i_filelock_ref;
- u32 i_shared_gen; /* increment each time we get FILE_SHARED */
+ atomic_t i_shared_gen; /* increment each time we get FILE_SHARED */
u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
@@ -648,7 +649,7 @@ extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check);
extern void ceph_caps_init(struct ceph_mds_client *mdsc);
extern void ceph_caps_finalize(struct ceph_mds_client *mdsc);
extern void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta);
-extern void ceph_reserve_caps(struct ceph_mds_client *mdsc,
+extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx, int need);
extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
struct ceph_cap_reservation *ctx);
@@ -668,6 +669,9 @@ struct ceph_file_info {
short fmode; /* initialized on open */
short flags; /* CEPH_F_* */
+ spinlock_t rw_contexts_lock;
+ struct list_head rw_contexts;
+
/* readdir: position within the dir */
u32 frag;
struct ceph_mds_request *last_readdir;
@@ -684,6 +688,49 @@ struct ceph_file_info {
int dir_info_len;
};
+struct ceph_rw_context {
+ struct list_head list;
+ struct task_struct *thread;
+ int caps;
+};
+
+#define CEPH_DEFINE_RW_CONTEXT(_name, _caps) \
+ struct ceph_rw_context _name = { \
+ .thread = current, \
+ .caps = _caps, \
+ }
+
+static inline void ceph_add_rw_context(struct ceph_file_info *cf,
+ struct ceph_rw_context *ctx)
+{
+ spin_lock(&cf->rw_contexts_lock);
+ list_add(&ctx->list, &cf->rw_contexts);
+ spin_unlock(&cf->rw_contexts_lock);
+}
+
+static inline void ceph_del_rw_context(struct ceph_file_info *cf,
+ struct ceph_rw_context *ctx)
+{
+ spin_lock(&cf->rw_contexts_lock);
+ list_del(&ctx->list);
+ spin_unlock(&cf->rw_contexts_lock);
+}
+
+static inline struct ceph_rw_context*
+ceph_find_rw_context(struct ceph_file_info *cf)
+{
+ struct ceph_rw_context *ctx, *found = NULL;
+ spin_lock(&cf->rw_contexts_lock);
+ list_for_each_entry(ctx, &cf->rw_contexts, list) {
+ if (ctx->thread == current) {
+ found = ctx;
+ break;
+ }
+ }
+ spin_unlock(&cf->rw_contexts_lock);
+ return found;
+}
+
struct ceph_readdir_cache_control {
struct page *page;
struct dentry **dentries;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index c7a863219fa3..e35e711db68e 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -128,6 +128,10 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_puts(m, " type: CDROM ");
else
seq_printf(m, " type: %d ", dev_type);
+ if (tcon->seal)
+ seq_printf(m, " Encrypted");
+ if (tcon->unix_ext)
+ seq_printf(m, " POSIX Extensions");
if (tcon->ses->server->ops->dump_share_caps)
tcon->ses->server->ops->dump_share_caps(m, tcon);
@@ -246,7 +250,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
atomic_read(&server->smbd_conn->mr_used_count));
skip_rdma:
#endif
- seq_printf(m, "\nNumber of credits: %d", server->credits);
+ seq_printf(m, "\nNumber of credits: %d Dialect 0x%x",
+ server->credits, server->dialect);
+ if (server->sign)
+ seq_printf(m, " signed");
i++;
list_for_each(tmp2, &server->smb_ses_list) {
ses = list_entry(tmp2, struct cifs_ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4e0922d24eb2..9ceebf30eb22 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -6343,9 +6343,7 @@ SetEARetry:
pSMB->InformationLevel =
cpu_to_le16(SMB_SET_FILE_EA);
- parm_data =
- (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
- offset);
+ parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->SetupCount = 1;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6eb9f9691ed4..2a2b34ccaf49 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -192,6 +192,35 @@ struct smb2_symlink_err_rsp {
__u8 PathBuffer[0];
} __packed;
+/* SMB 3.1.1 and later dialects. See MS-SMB2 section 2.2.2.1 */
+struct smb2_error_context_rsp {
+ __le32 ErrorDataLength;
+ __le32 ErrorId;
+ __u8 ErrorContextData; /* ErrorDataLength long array */
+} __packed;
+
+/* Defines for Type field below (see MS-SMB2 2.2.2.2.2.1) */
+#define MOVE_DST_IPADDR_V4 cpu_to_le32(0x00000001)
+#define MOVE_DST_IPADDR_V6 cpu_to_le32(0x00000002)
+
+struct move_dst_ipaddr {
+ __le32 Type;
+ __u32 Reserved;
+ __u8 address[16]; /* IPv4 followed by 12 bytes rsvd or IPv6 address */
+} __packed;
+
+struct share_redirect_error_context_rsp {
+ __le32 StructureSize;
+ __le32 NotificationType;
+ __le32 ResourceNameOffset;
+ __le32 ResourceNameLength;
+ __le16 Flags;
+ __le16 TargetType;
+ __le32 IPAddrCount;
+ struct move_dst_ipaddr IpAddrMoveList[0];
+ /* __u8 ResourceName[] */ /* Name of share as counted Unicode string */
+} __packed;
+
#define SMB2_CLIENT_GUID_SIZE 16
struct smb2_negotiate_req {
@@ -320,7 +349,9 @@ struct smb2_logoff_rsp {
} __packed;
/* Flags/Reserved for SMB3.1.1 */
-#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
+#define SMB2_TREE_CONNECT_FLAG_CLUSTER_RECONNECT cpu_to_le16(0x0001)
+#define SMB2_TREE_CONNECT_FLAG_REDIRECT_TO_OWNER cpu_to_le16(0x0002)
+#define SMB2_TREE_CONNECT_FLAG_EXTENSION_PRESENT cpu_to_le16(0x0004)
struct smb2_tree_connect_req {
struct smb2_sync_hdr sync_hdr;
@@ -331,6 +362,82 @@ struct smb2_tree_connect_req {
__u8 Buffer[1]; /* variable length */
} __packed;
+/* See MS-SMB2 section 2.2.9.2 */
+/* Context Types */
+#define SMB2_RESERVED_TREE_CONNECT_CONTEXT_ID 0x0000
+#define SMB2_REMOTED_IDENTITY_TREE_CONNECT_CONTEXT_ID cpu_to_le16(0x0001)
+
+struct tree_connect_contexts {
+ __le16 ContextType;
+ __le16 DataLength;
+ __le32 Reserved;
+ __u8 Data[0];
+} __packed;
+
+/* Remoted identity tree connect context structures - see MS-SMB2 2.2.9.2.1 */
+struct smb3_blob_data {
+ __le16 BlobSize;
+ __u8 BlobData[0];
+} __packed;
+
+/* Valid values for Attr */
+#define SE_GROUP_MANDATORY 0x00000001
+#define SE_GROUP_ENABLED_BY_DEFAULT 0x00000002
+#define SE_GROUP_ENABLED 0x00000004
+#define SE_GROUP_OWNER 0x00000008
+#define SE_GROUP_USE_FOR_DENY_ONLY 0x00000010
+#define SE_GROUP_INTEGRITY 0x00000020
+#define SE_GROUP_INTEGRITY_ENABLED 0x00000040
+#define SE_GROUP_RESOURCE 0x20000000
+#define SE_GROUP_LOGON_ID 0xC0000000
+
+/* struct sid_attr_data is SidData array in BlobData format then le32 Attr */
+
+struct sid_array_data {
+ __le16 SidAttrCount;
+ /* SidAttrList - array of sid_attr_data structs */
+} __packed;
+
+struct luid_attr_data {
+
+} __packed;
+
+/*
+ * struct privilege_data is the same as BLOB_DATA - see MS-SMB2 2.2.9.2.1.5
+ * but with size of LUID_ATTR_DATA struct and BlobData set to LUID_ATTR DATA
+ */
+
+struct privilege_array_data {
+ __le16 PrivilegeCount;
+ /* array of privilege_data structs */
+} __packed;
+
+struct remoted_identity_tcon_context {
+ __le16 TicketType; /* must be 0x0001 */
+ __le16 TicketSize; /* total size of this struct */
+ __le16 User; /* offset to SID_ATTR_DATA struct with user info */
+ __le16 UserName; /* offset to null terminated Unicode username string */
+ __le16 Domain; /* offset to null terminated Unicode domain name */
+ __le16 Groups; /* offset to SID_ARRAY_DATA struct with group info */
+ __le16 RestrictedGroups; /* similar to above */
+ __le16 Privileges; /* offset to PRIVILEGE_ARRAY_DATA struct */
+ __le16 PrimaryGroup; /* offset to SID_ARRAY_DATA struct */
+ __le16 Owner; /* offset to BLOB_DATA struct */
+ __le16 DefaultDacl; /* offset to BLOB_DATA struct */
+ __le16 DeviceGroups; /* offset to SID_ARRAY_DATA struct */
+ __le16 UserClaims; /* offset to BLOB_DATA struct */
+ __le16 DeviceClaims; /* offset to BLOB_DATA struct */
+ __u8 TicketInfo[0]; /* variable length buf - remoted identity data */
+} __packed;
+
+struct smb2_tree_connect_req_extension {
+ __le32 TreeConnectContextOffset;
+ __le16 TreeConnectContextCount;
+ __u8 Reserved[10];
+ __u8 PathName[0]; /* variable sized array */
+ /* followed by array of TreeConnectContexts */
+} __packed;
+
struct smb2_tree_connect_rsp {
struct smb2_hdr hdr;
__le16 StructureSize; /* Must be 16 */
@@ -365,7 +472,8 @@ struct smb2_tree_connect_rsp {
#define SHI1005_FLAGS_ENABLE_HASH_V1 0x00002000
#define SHI1005_FLAGS_ENABLE_HASH_V2 0x00004000
#define SHI1005_FLAGS_ENCRYPT_DATA 0x00008000
-#define SHI1005_FLAGS_ALL 0x0000FF33
+#define SMB2_SHAREFLAG_IDENTITY_REMOTING 0x00040000 /* 3.1.1 */
+#define SHI1005_FLAGS_ALL 0x0004FF33
/* Possible share capabilities */
#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008) /* all dialects */
@@ -373,6 +481,7 @@ struct smb2_tree_connect_rsp {
#define SMB2_SHARE_CAP_SCALEOUT cpu_to_le32(0x00000020) /* 3.0 */
#define SMB2_SHARE_CAP_CLUSTER cpu_to_le32(0x00000040) /* 3.0 */
#define SMB2_SHARE_CAP_ASYMMETRIC cpu_to_le32(0x00000080) /* 3.02 */
+#define SMB2_SHARE_CAP_REDIRECT_TO_OWNER cpu_to_le32(0x00000100) /* 3.1.1 */
struct smb2_tree_disconnect_req {
struct smb2_sync_hdr sync_hdr;
@@ -556,6 +665,7 @@ struct create_context {
#define SMB2_LEASE_WRITE_CACHING cpu_to_le32(0x04)
#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS cpu_to_le32(0x02)
+#define SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET cpu_to_le32(0x00000004)
#define SMB2_LEASE_KEY_SIZE 16
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5130492847eb..91710eb571fb 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -217,9 +217,10 @@ static void smbd_destroy_rdma_work(struct work_struct *work)
spin_unlock_irqrestore(
&info->reassembly_queue_lock, flags);
put_receive_buffer(info, response);
- }
+ } else
+ spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
} while (response);
- spin_unlock_irqrestore(&info->reassembly_queue_lock, flags);
+
info->reassembly_data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
@@ -1934,15 +1935,16 @@ again:
* No need to lock if we are not at the
* end of the queue
*/
- if (!queue_length)
+ if (queue_length)
+ list_del(&response->list);
+ else {
spin_lock_irq(
&info->reassembly_queue_lock);
- list_del(&response->list);
- queue_removed++;
- if (!queue_length)
+ list_del(&response->list);
spin_unlock_irq(
&info->reassembly_queue_lock);
-
+ }
+ queue_removed++;
info->count_reassembly_queue--;
info->count_dequeue_reassembly_queue++;
put_receive_buffer(info, response);
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
index 58e2fe40b2a0..5933f995309a 100644
--- a/fs/cramfs/Kconfig
+++ b/fs/cramfs/Kconfig
@@ -33,8 +33,7 @@ config CRAMFS_BLOCKDEV
config CRAMFS_MTD
bool "Support CramFs image directly mapped in physical memory"
- depends on CRAMFS && MTD
- depends on CRAMFS=m || MTD=y
+ depends on CRAMFS && CRAMFS <= MTD
default y if !CRAMFS_BLOCKDEV
help
This option allows the CramFs driver to load data directly from
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index c53d9cc5ae7a..a03ce3422578 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -275,7 +275,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
{
struct kernfs_open_file *of = kernfs_of(file);
const struct kernfs_ops *ops;
- size_t len;
+ ssize_t len;
char *buf;
if (of->atomic_write_len) {
diff --git a/fs/locks.c b/fs/locks.c
index 21b4dfa289ee..d6ff4beb70ce 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1554,9 +1554,9 @@ out:
EXPORT_SYMBOL(__break_lease);
/**
- * lease_get_mtime - get the last modified time of an inode
+ * lease_get_mtime - update modified time of an inode with exclusive lease
* @inode: the inode
- * @time: pointer to a timespec which will contain the last modified time
+ * @time: pointer to a timespec which contains the last modified time
*
* This is to force NFS clients to flush their caches for files with
* exclusive leases. The justification is that if someone has an
@@ -1580,8 +1580,6 @@ void lease_get_mtime(struct inode *inode, struct timespec *time)
if (has_lease)
*time = current_time(inode);
- else
- *time = inode->i_mtime;
}
EXPORT_SYMBOL(lease_get_mtime);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 2758480555fa..1a70581e1cb2 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -251,6 +251,34 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
}
/*
+ * Fill in the pre_op attr for the wcc data
+ */
+void fill_pre_wcc(struct svc_fh *fhp)
+{
+ struct inode *inode;
+ struct kstat stat;
+ __be32 err;
+
+ if (fhp->fh_pre_saved)
+ return;
+
+ inode = d_inode(fhp->fh_dentry);
+ err = fh_getattr(fhp, &stat);
+ if (err) {
+ /* Grab the times from inode anyway */
+ stat.mtime = inode->i_mtime;
+ stat.ctime = inode->i_ctime;
+ stat.size = inode->i_size;
+ }
+
+ fhp->fh_pre_mtime = stat.mtime;
+ fhp->fh_pre_ctime = stat.ctime;
+ fhp->fh_pre_size = stat.size;
+ fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
+ fhp->fh_pre_saved = true;
+}
+
+/*
* Fill in the post_op attr for the wcc data
*/
void fill_post_wcc(struct svc_fh *fhp)
@@ -261,7 +289,8 @@ void fill_post_wcc(struct svc_fh *fhp)
printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr);
- fhp->fh_post_change = nfsd4_change_attribute(d_inode(fhp->fh_dentry));
+ fhp->fh_post_change = nfsd4_change_attribute(&fhp->fh_post_attr,
+ d_inode(fhp->fh_dentry));
if (err) {
fhp->fh_post_saved = false;
/* Grab the ctime anyway - set_change_info might use it */
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 008ea0b627d0..a0bed2b2004d 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1363,14 +1363,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
- int accmode;
+ int accmode = NFSD_MAY_READ_IF_EXEC;
switch (lgp->lg_seg.iomode) {
case IOMODE_READ:
- accmode = NFSD_MAY_READ;
+ accmode |= NFSD_MAY_READ;
break;
case IOMODE_RW:
- accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
+ accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
break;
default:
dprintk("%s: invalid iomode %d\n",
@@ -1703,6 +1703,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)
goto out;
+ status = nfserr_resource;
+ if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
+ goto out;
status = nfs41_check_op_ordering(args);
if (status) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b29b5a185a2c..150521c9671b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s)
switch (s->sc_type) {
default:
break;
+ case 0:
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
ret = nfserr_bad_stateid;
@@ -5182,7 +5183,6 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
lockowner(stp->st_stateowner)))
goto out;
- stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(stp);
ret = nfs_ok;
@@ -6078,10 +6078,8 @@ out:
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
- if (status && new) {
- lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
+ if (status && new)
release_lock_stateid(lock_stp);
- }
mutex_unlock(&lock_stp->st_mutex);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2c61c6b8ae09..e502fd16246b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -455,8 +455,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
}
label->len = 0;
-#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
- if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
+ if (IS_ENABLED(CONFIG_NFSD_V4_SECURITY_LABEL) &&
+ bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
READ_BUF(4);
len += 4;
dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
@@ -476,7 +476,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
if (!label->data)
return nfserr_jukebox;
}
-#endif
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
if (!umask)
goto xdr_error;
@@ -1918,8 +1917,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
if (argp->taglen > NFSD4_MAX_TAGLEN)
goto xdr_error;
- if (argp->opcnt > 100)
- goto xdr_error;
+ /*
+ * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS
+ * here, so we return success at the xdr level so that
+ * nfsd4_proc can handle this is an NFS-level error.
+ */
+ if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
+ return 0;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
@@ -1991,7 +1995,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0;
} else if (IS_I_VERSION(inode)) {
- p = xdr_encode_hyper(p, nfsd4_change_attribute(inode));
+ p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
} else {
*p++ = cpu_to_be32(stat->ctime.tv_sec);
*p++ = cpu_to_be32(stat->ctime.tv_nsec);
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index b8444189223b..755e256a9103 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -253,36 +253,20 @@ fh_clear_wcc(struct svc_fh *fhp)
* By using both ctime and the i_version counter we guarantee that as
* long as time doesn't go backwards we never reuse an old value.
*/
-static inline u64 nfsd4_change_attribute(struct inode *inode)
+static inline u64 nfsd4_change_attribute(struct kstat *stat,
+ struct inode *inode)
{
u64 chattr;
- chattr = inode->i_ctime.tv_sec;
+ chattr = stat->ctime.tv_sec;
chattr <<= 30;
- chattr += inode->i_ctime.tv_nsec;
+ chattr += stat->ctime.tv_nsec;
chattr += inode_query_iversion(inode);
return chattr;
}
-/*
- * Fill in the pre_op attr for the wcc data
- */
-static inline void
-fill_pre_wcc(struct svc_fh *fhp)
-{
- struct inode *inode;
-
- inode = d_inode(fhp->fh_dentry);
- if (!fhp->fh_pre_saved) {
- fhp->fh_pre_mtime = inode->i_mtime;
- fhp->fh_pre_ctime = inode->i_ctime;
- fhp->fh_pre_size = inode->i_size;
- fhp->fh_pre_change = nfsd4_change_attribute(inode);
- fhp->fh_pre_saved = true;
- }
-}
-
-extern void fill_post_wcc(struct svc_fh *);
+extern void fill_pre_wcc(struct svc_fh *fhp);
+extern void fill_post_wcc(struct svc_fh *fhp);
#else
#define fh_clear_wcc(ignored)
#define fill_pre_wcc(ignored)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 644a0342f0e0..79b6064f8977 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -188,6 +188,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl((u32) stat->ino);
*p++ = htonl((u32) stat->atime.tv_sec);
*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
+ time = stat->mtime;
lease_get_mtime(d_inode(dentry), &time);
*p++ = htonl((u32) time.tv_sec);
*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index ae782df5c063..fe484cf93e5c 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -33,7 +33,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
new_op->upcall.req.lookup.parent_refn = parent->refn;
strncpy(new_op->upcall.req.lookup.d_name,
dentry->d_name.name,
- ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
gossip_debug(GOSSIP_DCACHE_DEBUG,
"%s:%s:%d interrupt flag [%d]\n",
@@ -118,8 +118,12 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
/* We do not need to continue with negative dentries. */
- if (!dentry->d_inode)
- goto out;
+ if (!dentry->d_inode) {
+ gossip_debug(GOSSIP_DCACHE_DEBUG,
+ "%s: negative dentry or positive dentry and inode valid.\n",
+ __func__);
+ return 1;
+ }
/* Now we must perform a getattr to validate the inode contents. */
@@ -129,14 +133,7 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
__FILE__, __func__, __LINE__);
return 0;
}
- if (ret == 0)
- return 0;
-
-out:
- gossip_debug(GOSSIP_DCACHE_DEBUG,
- "%s: negative dentry or positive dentry and inode valid.\n",
- __func__);
- return 1;
+ return !ret;
}
const struct dentry_operations orangefs_dentry_operations = {
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index c98bba2dbc94..6e3134e6d98a 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -41,7 +41,7 @@ static int orangefs_create(struct inode *dir,
ORANGEFS_TYPE_METAFILE, mode);
strncpy(new_op->upcall.req.create.d_name,
- dentry->d_name.name, ORANGEFS_NAME_MAX);
+ dentry->d_name.name, ORANGEFS_NAME_MAX - 1);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -142,7 +142,7 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
new_op->upcall.req.lookup.parent_refn = parent->refn;
strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name,
- ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: doing lookup on %s under %pU,%d\n",
@@ -244,7 +244,7 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
new_op->upcall.req.remove.parent_refn = parent->refn;
strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name,
- ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
ret = service_operation(new_op, "orangefs_unlink",
get_interruptible_flag(inode));
@@ -300,8 +300,8 @@ static int orangefs_symlink(struct inode *dir,
strncpy(new_op->upcall.req.sym.entry_name,
dentry->d_name.name,
- ORANGEFS_NAME_MAX);
- strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
+ strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_MAX - 1);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -372,7 +372,7 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
ORANGEFS_TYPE_DIRECTORY, mode);
strncpy(new_op->upcall.req.mkdir.d_name,
- dentry->d_name.name, ORANGEFS_NAME_MAX);
+ dentry->d_name.name, ORANGEFS_NAME_MAX - 1);
ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
@@ -453,10 +453,10 @@ static int orangefs_rename(struct inode *old_dir,
strncpy(new_op->upcall.req.rename.d_old_name,
old_dentry->d_name.name,
- ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
strncpy(new_op->upcall.req.rename.d_new_name,
new_dentry->d_name.name,
- ORANGEFS_NAME_MAX);
+ ORANGEFS_NAME_MAX - 1);
ret = service_operation(new_op,
"orangefs_rename",
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
index 1c59dff530de..6e35f2f3c897 100644
--- a/fs/orangefs/orangefs-debugfs.c
+++ b/fs/orangefs/orangefs-debugfs.c
@@ -328,7 +328,7 @@ static int help_show(struct seq_file *m, void *v)
/*
* initialize the client-debug file.
*/
-int orangefs_client_debug_init(void)
+static int orangefs_client_debug_init(void)
{
int rc = -ENOMEM;
@@ -1056,7 +1056,7 @@ int orangefs_debugfs_new_debug(void __user *arg)
client_debug_string,
llu(mask_info.mask_value));
} else {
- gossip_lerr("Invalid mask type....\n");
+ gossip_err("Invalid mask type....\n");
return -EINVAL;
}
diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h
index b5fd9cd4960f..51147f9ce3d6 100644
--- a/fs/orangefs/orangefs-debugfs.h
+++ b/fs/orangefs/orangefs-debugfs.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
int orangefs_debugfs_init(int);
void orangefs_debugfs_cleanup(void);
-int orangefs_client_debug_init(void);
int orangefs_prepare_debugfs_help_string(int);
int orangefs_debugfs_new_client_mask(void __user *);
int orangefs_debugfs_new_client_string(void __user *);
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 2595453fe737..eebbaece85ef 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -56,11 +56,7 @@
#include "orangefs-dev-proto.h"
-#ifdef ORANGEFS_KERNEL_DEBUG
-#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 10
-#else
#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS 20
-#endif
#define ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS 30
@@ -104,11 +100,11 @@ enum orangefs_vfs_op_states {
* orangefs kernel memory related flags
*/
-#if ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB))
+#if (defined CONFIG_DEBUG_SLAB)
#define ORANGEFS_CACHE_CREATE_FLAGS SLAB_RED_ZONE
#else
#define ORANGEFS_CACHE_CREATE_FLAGS 0
-#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */
+#endif
extern int orangefs_init_acl(struct inode *inode, struct inode *dir);
extern const struct xattr_handler *orangefs_xattr_handlers[];
@@ -471,8 +467,6 @@ int orangefs_inode_check_changed(struct inode *inode);
int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr);
-void orangefs_make_bad_inode(struct inode *inode);
-
int orangefs_unmount_sb(struct super_block *sb);
bool orangefs_cancel_op_in_progress(struct orangefs_kernel_op_s *op);
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 97fe93129f38..ea6256d136d1 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -230,25 +230,42 @@ static int orangefs_inode_type(enum orangefs_ds_type objtype)
return -1;
}
-static int orangefs_inode_is_stale(struct inode *inode, int new,
+static void orangefs_make_bad_inode(struct inode *inode)
+{
+ if (is_root_handle(inode)) {
+ /*
+ * if this occurs, the pvfs2-client-core was killed but we
+ * can't afford to lose the inode operations and such
+ * associated with the root handle in any case.
+ */
+ gossip_debug(GOSSIP_UTILS_DEBUG,
+ "*** NOT making bad root inode %pU\n",
+ get_khandle_from_ino(inode));
+ } else {
+ gossip_debug(GOSSIP_UTILS_DEBUG,
+ "*** making bad inode %pU\n",
+ get_khandle_from_ino(inode));
+ make_bad_inode(inode);
+ }
+}
+
+static int orangefs_inode_is_stale(struct inode *inode,
struct ORANGEFS_sys_attr_s *attrs, char *link_target)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
int type = orangefs_inode_type(attrs->objtype);
- if (!new) {
- /*
- * If the inode type or symlink target have changed then this
- * inode is stale.
- */
- if (type == -1 || !(inode->i_mode & type)) {
- orangefs_make_bad_inode(inode);
- return 1;
- }
- if (type == S_IFLNK && strncmp(orangefs_inode->link_target,
- link_target, ORANGEFS_NAME_MAX)) {
- orangefs_make_bad_inode(inode);
- return 1;
- }
+ /*
+ * If the inode type or symlink target have changed then this
+ * inode is stale.
+ */
+ if (type == -1 || !(inode->i_mode & type)) {
+ orangefs_make_bad_inode(inode);
+ return 1;
+ }
+ if (type == S_IFLNK && strncmp(orangefs_inode->link_target,
+ link_target, ORANGEFS_NAME_MAX)) {
+ orangefs_make_bad_inode(inode);
+ return 1;
}
return 0;
}
@@ -294,16 +311,18 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass,
if (ret != 0)
goto out;
- type = orangefs_inode_type(new_op->
- downcall.resp.getattr.attributes.objtype);
- ret = orangefs_inode_is_stale(inode, new,
- &new_op->downcall.resp.getattr.attributes,
- new_op->downcall.resp.getattr.link_target);
- if (ret) {
- ret = -ESTALE;
- goto out;
+ if (!new) {
+ ret = orangefs_inode_is_stale(inode,
+ &new_op->downcall.resp.getattr.attributes,
+ new_op->downcall.resp.getattr.link_target);
+ if (ret) {
+ ret = -ESTALE;
+ goto out;
+ }
}
+ type = orangefs_inode_type(new_op->
+ downcall.resp.getattr.attributes.objtype);
switch (type) {
case S_IFREG:
inode->i_flags = orangefs_inode_flags(&new_op->
@@ -348,6 +367,12 @@ int orangefs_inode_getattr(struct inode *inode, int new, int bypass,
inode->i_link = orangefs_inode->link_target;
}
break;
+ /* i.e. -1 */
+ default:
+ /* XXX: ESTALE? This is what is done if it is not new. */
+ orangefs_make_bad_inode(inode);
+ ret = -ESTALE;
+ goto out;
}
inode->i_uid = make_kuid(&init_user_ns, new_op->
@@ -401,7 +426,7 @@ int orangefs_inode_check_changed(struct inode *inode)
if (ret != 0)
goto out;
- ret = orangefs_inode_is_stale(inode, 0,
+ ret = orangefs_inode_is_stale(inode,
&new_op->downcall.resp.getattr.attributes,
new_op->downcall.resp.getattr.link_target);
out:
@@ -444,25 +469,6 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
return ret;
}
-void orangefs_make_bad_inode(struct inode *inode)
-{
- if (is_root_handle(inode)) {
- /*
- * if this occurs, the pvfs2-client-core was killed but we
- * can't afford to lose the inode operations and such
- * associated with the root handle in any case.
- */
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "*** NOT making bad root inode %pU\n",
- get_khandle_from_ino(inode));
- } else {
- gossip_debug(GOSSIP_UTILS_DEBUG,
- "*** making bad inode %pU\n",
- get_khandle_from_ino(inode));
- make_bad_inode(inode);
- }
-}
-
/*
* The following is a very dirty hack that is now a permanent part of the
* ORANGEFS protocol. See protocol.h for more error definitions.
@@ -537,6 +543,7 @@ int orangefs_normalize_to_errno(__s32 error_code)
*/
} else {
gossip_err("orangefs: orangefs_normalize_to_errno: got error code which is not from ORANGEFS.\n");
+ error_code = -EINVAL;
}
return error_code;
}
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index e0bf5e4dce0d..dc6e3e6269c3 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -395,13 +395,6 @@ struct ORANGEFS_dev_map_desc {
/* gossip.h *****************************************************************/
-#ifdef GOSSIP_DISABLE_DEBUG
-#define gossip_debug(mask, fmt, ...) \
-do { \
- if (0) \
- printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
-} while (0)
-#else
extern __u64 orangefs_gossip_debug_mask;
/* try to avoid function call overhead by checking masks in macro */
@@ -410,13 +403,5 @@ do { \
if (orangefs_gossip_debug_mask & (mask)) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
-#endif /* GOSSIP_DISABLE_DEBUG */
-
-/* do file and line number printouts w/ the GNU preprocessor */
-#define gossip_ldebug(mask, fmt, ...) \
- gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__)
#define gossip_err pr_err
-#define gossip_lerr(fmt, ...) \
- gossip_err("%s line %d: " fmt, \
- __FILE__, __LINE__, ##__VA_ARGS__)
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 62d49e53061c..3ae5fdba0225 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -335,7 +335,7 @@ static int orangefs_encode_fh(struct inode *inode,
struct orangefs_object_kref refn;
if (*max_len < len) {
- gossip_lerr("fh buffer is too small for encoding\n");
+ gossip_err("fh buffer is too small for encoding\n");
*max_len = len;
type = 255;
goto out;
@@ -383,7 +383,7 @@ static int orangefs_unmount(int id, __s32 fs_id, const char *devname)
op->upcall.req.fs_umount.id = id;
op->upcall.req.fs_umount.fs_id = fs_id;
strncpy(op->upcall.req.fs_umount.orangefs_config_server,
- devname, ORANGEFS_MAX_SERVER_ADDR_LEN);
+ devname, ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
r = service_operation(op, "orangefs_fs_umount", 0);
/* Not much to do about an error here. */
if (r)
@@ -478,7 +478,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN);
+ ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
gossip_debug(GOSSIP_SUPER_DEBUG,
"Attempting ORANGEFS Mount via host %s\n",
@@ -520,7 +520,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
*/
strncpy(ORANGEFS_SB(sb)->devname,
devname,
- ORANGEFS_MAX_SERVER_ADDR_LEN);
+ ORANGEFS_MAX_SERVER_ADDR_LEN - 1);
/* mount_pending must be cleared */
ORANGEFS_SB(sb)->mount_pending = 0;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 4be761c1a03d..eea09f6d8830 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -181,8 +181,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
* if request is to read from zero offset, reset iterator to first
* record as it might have been already advanced by previous requests
*/
- if (*ppos == 0)
+ if (*ppos == 0) {
m->index = 0;
+ m->version = 0;
+ m->count = 0;
+ }
/* Don't assume *ppos is where we left it */
if (unlikely(*ppos != m->read_pos)) {
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index c77b91ff1149..f2eac81bfbd2 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index ffe364fa4040..1adcda4bfb54 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 3c46f0ef5f7a..bb3e746dad34 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index d8dd3bf51ca7..d497b9bcd84e 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index c2e664e74075..6eceb69cffc4 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index 0887d7cbb7e1..fae98927052f 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index d5c0f5153c4e..d6345e9870a1 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index c589c3e12d90..c2bf1255f5aa 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20171215
+#define ACPI_CA_VERSION 0x20180105
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 343dbdcef20c..1becc88da82f 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 89509b86cb54..17c4d1fdc88d 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 4c304bf4d591..a398d5938f34 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 0d60d5df14f8..366a491f2af0 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 5bde2e700530..ebad40eda9b7 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 31f1be74dd16..310b542abe23 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -485,7 +485,7 @@ typedef u8 acpi_owner_id;
/*
* Constants with special meanings
*/
-#define ACPI_ROOT_OBJECT ACPI_ADD_PTR (acpi_handle, NULL, ACPI_MAX_PTR)
+#define ACPI_ROOT_OBJECT ((acpi_handle) ACPI_TO_POINTER (ACPI_MAX_PTR))
#define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */
#define ACPI_DO_NOT_WAIT 0
@@ -532,13 +532,13 @@ typedef u64 acpi_integer;
#define ACPI_CAST_INDIRECT_PTR(t, p) ((t **) (acpi_uintptr_t) (p))
#define ACPI_ADD_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) + (acpi_size)(b)))
#define ACPI_SUB_PTR(t, a, b) ACPI_CAST_PTR (t, (ACPI_CAST_PTR (u8, (a)) - (acpi_size)(b)))
-#define ACPI_PTR_DIFF(a, b) (acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b)))
+#define ACPI_PTR_DIFF(a, b) ((acpi_size) (ACPI_CAST_PTR (u8, (a)) - ACPI_CAST_PTR (u8, (b))))
/* Pointer/Integer type conversions */
-#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) NULL,(acpi_size) i)
-#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) NULL)
-#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) NULL)
+#define ACPI_TO_POINTER(i) ACPI_ADD_PTR (void, (void *) 0, (acpi_size) (i))
+#define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0)
+#define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0)
#define ACPI_PHYSADDR_TO_PTR(i) ACPI_TO_POINTER(i)
#define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i)
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index b1a0a8a64c3d..f0ba9bca2b12 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 043fd559de6e..c33ec562b585 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 127c848a1ba7..c4b1b110aeb9 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 9c8f8b79644e..666256f6c97d 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 4f701b288cec..e7baa58b0e31 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index bdb6858e2458..f0c5be8ba4d5 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 1b473efd9eb6..adee92c38c43 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -206,7 +206,7 @@
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
-#if defined(__ia64__) || defined(__x86_64__) ||\
+#if defined(__ia64__) || (defined(__x86_64__) && !defined(__ILP32__)) ||\
defined(__aarch64__) || defined(__PPC64__) ||\
defined(__s390x__)
#define ACPI_MACHINE_WIDTH 64
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index efdff527f8fc..b066d75a9359 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 9da6ce22803f..6502feb9524b 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -90,6 +90,8 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
void kvm_timer_init_vhe(void);
+bool kvm_arch_timer_get_input_level(int vintid);
+
#define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer)
#define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer)
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
new file mode 100644
index 000000000000..e518e4e3dfb5
--- /dev/null
+++ b/include/kvm/arm_psci.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __KVM_ARM_PSCI_H__
+#define __KVM_ARM_PSCI_H__
+
+#include <linux/kvm_host.h>
+#include <uapi/linux/psci.h>
+
+#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
+#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
+#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
+
+#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+/*
+ * We need the KVM pointer independently from the vcpu as we can call
+ * this from HYP, and need to apply kern_hyp_va on it...
+ */
+static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
+{
+ /*
+ * Our PSCI implementation stays the same across versions from
+ * v0.2 onward, only adding the few mandatory functions (such
+ * as FEATURES with 1.0) that are required by newer
+ * revisions. It is thus safe to return the latest.
+ */
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+ return KVM_ARM_PSCI_LATEST;
+
+ return KVM_ARM_PSCI_0_1;
+}
+
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_ARM_PSCI_H__ */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 8c896540a72c..cdbd142ca7f2 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -130,6 +130,17 @@ struct vgic_irq {
u8 priority;
enum vgic_irq_config config; /* Level or edge */
+ /*
+ * Callback function pointer to in-kernel devices that can tell us the
+ * state of the input level of mapped level-triggered IRQ faster than
+ * peaking into the physical GIC.
+ *
+ * Always called in non-preemptible section and the functions can use
+ * kvm_arm_get_running_vcpu() to get the vcpu pointer for private
+ * IRQs.
+ */
+ bool (*get_input_level)(int vintid);
+
void *owner; /* Opaque pointer to reserve an interrupt
for in-kernel devices. */
};
@@ -331,7 +342,7 @@ void kvm_vgic_init_cpu_hardware(void);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid);
+ u32 vintid, bool (*get_input_level)(int vindid));
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index e6d41b65d396..64e10746f282 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1263,9 +1263,12 @@ static inline bool acpi_has_watchdog(void) { return false; }
#ifdef CONFIG_ACPI_SPCR_TABLE
extern bool qdf2400_e44_present;
-int parse_spcr(bool earlycon);
+int acpi_parse_spcr(bool enable_earlycon, bool enable_console);
#else
-static inline int parse_spcr(bool earlycon) { return 0; }
+static inline int acpi_parse_spcr(bool enable_earlycon, bool enable_console)
+{
+ return 0;
+}
#endif
#if IS_ENABLED(CONFIG_ACPI_GENERIC_GSI)
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 4c5bca38c653..a031897fca76 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -14,14 +14,16 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <uapi/linux/const.h>
+
/*
* This file provides common defines for ARM SMC Calling Convention as
* specified in
* http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
*/
-#define ARM_SMCCC_STD_CALL 0
-#define ARM_SMCCC_FAST_CALL 1
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
#define ARM_SMCCC_TYPE_SHIFT 31
#define ARM_SMCCC_SMC_32 0
@@ -60,6 +62,24 @@
#define ARM_SMCCC_QUIRK_NONE 0
#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__)
+/* SMCCC v1.1 implementation madness follows */
+#ifdef CONFIG_ARM64
+
+#define SMCCC_SMC_INST "smc #0"
+#define SMCCC_HVC_INST "hvc #0"
+
+#elif defined(CONFIG_ARM)
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+
+#define SMCCC_SMC_INST __SMC(0)
+#define SMCCC_HVC_INST __HVC(0)
+
+#endif
+
+#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+
+#define __count_args(...) \
+ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
+
+#define __constraint_write_0 \
+ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_1 \
+ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
+#define __constraint_write_2 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
+#define __constraint_write_3 \
+ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
+#define __constraint_write_4 __constraint_write_3
+#define __constraint_write_5 __constraint_write_4
+#define __constraint_write_6 __constraint_write_5
+#define __constraint_write_7 __constraint_write_6
+
+#define __constraint_read_0
+#define __constraint_read_1
+#define __constraint_read_2
+#define __constraint_read_3
+#define __constraint_read_4 "r" (r4)
+#define __constraint_read_5 __constraint_read_4, "r" (r5)
+#define __constraint_read_6 __constraint_read_5, "r" (r6)
+#define __constraint_read_7 __constraint_read_6, "r" (r7)
+
+#define __declare_arg_0(a0, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register unsigned long r1 asm("r1"); \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_1(a0, a1, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register unsigned long r2 asm("r2"); \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_2(a0, a1, a2, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register unsigned long r3 asm("r3")
+
+#define __declare_arg_3(a0, a1, a2, a3, res) \
+ struct arm_smccc_res *___res = res; \
+ register u32 r0 asm("r0") = a0; \
+ register typeof(a1) r1 asm("r1") = a1; \
+ register typeof(a2) r2 asm("r2") = a2; \
+ register typeof(a3) r3 asm("r3") = a3
+
+#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ __declare_arg_3(a0, a1, a2, a3, res); \
+ register typeof(a4) r4 asm("r4") = a4
+
+#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ register typeof(a5) r5 asm("r5") = a5
+
+#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ register typeof(a6) r6 asm("r6") = a6
+
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ register typeof(a7) r7 asm("r7") = a7
+
+#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
+#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
+
+#define ___constraints(count) \
+ : __constraint_write_ ## count \
+ : __constraint_read_ ## count \
+ : "memory"
+#define __constraints(count) ___constraints(count)
+
+/*
+ * We have an output list that is not necessarily used, and GCC feels
+ * entitled to optimise the whole sequence away. "volatile" is what
+ * makes it stick.
+ */
+#define __arm_smccc_1_1(inst, ...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm volatile(inst "\n" \
+ __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction if not NULL.
+ */
+#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
+
+/*
+ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the HVC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the HVC instruction if not NULL.
+ */
+#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 065f3a8eb486..21e8d248d956 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -629,6 +629,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++)
/*
+ * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table
+ * with index
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_entry_idx(pos, table, idx) \
+ for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \
+ pos++, idx++)
+
+/*
* cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table
* excluding CPUFREQ_ENTRY_INVALID frequencies.
* @pos: the cpufreq_frequency_table * to use as a loop cursor.
@@ -641,6 +653,21 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
continue; \
else
+/*
+ * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq
+ * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies.
+ * @pos: the cpufreq_frequency_table * to use as a loop cursor.
+ * @table: the cpufreq_frequency_table * to iterate over.
+ * @idx: the table entry currently being processed
+ */
+
+#define cpufreq_for_each_valid_entry_idx(pos, table, idx) \
+ cpufreq_for_each_entry_idx(pos, table, idx) \
+ if (pos->frequency == CPUFREQ_ENTRY_INVALID) \
+ continue; \
+ else
+
+
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
@@ -667,19 +694,20 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq >= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Find lowest freq at or above target in a table in descending order */
@@ -687,28 +715,29 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
@@ -728,28 +757,29 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find highest freq at or below target in a table in descending order */
@@ -757,19 +787,20 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq <= target_freq)
- return pos - table;
+ return idx;
- best = pos;
+ best = idx;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
@@ -789,32 +820,33 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq < target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found below target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (target_freq - best->frequency > freq - target_freq)
- return pos - table;
+ if (target_freq - table[best].frequency > freq - target_freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Find closest freq to target in a table in descending order */
@@ -822,32 +854,33 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
unsigned int target_freq)
{
struct cpufreq_frequency_table *table = policy->freq_table;
- struct cpufreq_frequency_table *pos, *best = table - 1;
+ struct cpufreq_frequency_table *pos;
unsigned int freq;
+ int idx, best = -1;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, idx) {
freq = pos->frequency;
if (freq == target_freq)
- return pos - table;
+ return idx;
if (freq > target_freq) {
- best = pos;
+ best = idx;
continue;
}
/* No freq found above target_freq */
- if (best == table - 1)
- return pos - table;
+ if (best == -1)
+ return idx;
/* Choose the closest freq */
- if (best->frequency - target_freq > target_freq - freq)
- return pos - table;
+ if (table[best].frequency - target_freq > target_freq - freq)
+ return idx;
- return best - table;
+ return best;
}
- return best - table;
+ return best;
}
/* Works only on sorted freq-tables */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 263dbcad22fc..79563840c295 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -31,7 +31,7 @@
#ifdef __KERNEL__
struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
-unsigned char *arch_get_platform_get_mac_address(void);
+unsigned char *arch_get_platform_mac_address(void);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/idr.h b/include/linux/idr.h
index fa14f834e4ed..7d6a6313f0ab 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -15,10 +15,10 @@
#include <linux/radix-tree.h>
#include <linux/gfp.h>
#include <linux/percpu.h>
-#include <linux/bug.h>
struct idr {
struct radix_tree_root idr_rt;
+ unsigned int idr_base;
unsigned int idr_next;
};
@@ -31,10 +31,26 @@ struct idr {
/* Set the IDR flag and the IDR_FREE tag */
#define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT))
-#define IDR_INIT \
-{ \
- .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \
+#define IDR_INIT_BASE(base) { \
+ .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \
+ .idr_base = (base), \
+ .idr_next = 0, \
}
+
+/**
+ * IDR_INIT() - Initialise an IDR.
+ *
+ * A freshly-initialised IDR contains no IDs.
+ */
+#define IDR_INIT IDR_INIT_BASE(0)
+
+/**
+ * DEFINE_IDR() - Define a statically-allocated IDR
+ * @name: Name of IDR
+ *
+ * An IDR defined using this macro is ready for use with no additional
+ * initialisation required. It contains no IDs.
+ */
#define DEFINE_IDR(name) struct idr name = IDR_INIT
/**
@@ -82,80 +98,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
void idr_preload(gfp_t gfp_mask);
-int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
- unsigned long start, unsigned long end, gfp_t gfp,
- bool ext);
-
-/**
- * idr_alloc - allocate an id
- * @idr: idr handle
- * @ptr: pointer to be associated with the new id
- * @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive)
- * @gfp: memory allocation flags
- *
- * Allocates an unused ID in the range [start, end). Returns -ENOSPC
- * if there are no unused IDs in that range.
- *
- * Note that @end is treated as max when <= 0. This is to always allow
- * using @start + N as @end as long as N is inside integer range.
- *
- * Simultaneous modifications to the @idr are not allowed and should be
- * prevented by the user, usually with a lock. idr_alloc() may be called
- * concurrently with read-only accesses to the @idr, such as idr_find() and
- * idr_for_each_entry().
- */
-static inline int idr_alloc(struct idr *idr, void *ptr,
- int start, int end, gfp_t gfp)
-{
- unsigned long id;
- int ret;
-
- if (WARN_ON_ONCE(start < 0))
- return -EINVAL;
-
- ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false);
-
- if (ret)
- return ret;
-
- return id;
-}
-
-static inline int idr_alloc_ext(struct idr *idr, void *ptr,
- unsigned long *index,
- unsigned long start,
- unsigned long end,
- gfp_t gfp)
-{
- return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true);
-}
-
-int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t);
+int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
+int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id,
+ unsigned long max, gfp_t);
+int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
+void *idr_remove(struct idr *, unsigned long id);
+void *idr_find(const struct idr *, unsigned long id);
int idr_for_each(const struct idr *,
int (*fn)(int id, void *p, void *data), void *data);
void *idr_get_next(struct idr *, int *nextid);
-void *idr_get_next_ext(struct idr *idr, unsigned long *nextid);
-void *idr_replace(struct idr *, void *, int id);
-void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id);
+void *idr_get_next_ul(struct idr *, unsigned long *nextid);
+void *idr_replace(struct idr *, void *, unsigned long id);
void idr_destroy(struct idr *);
-static inline void *idr_remove_ext(struct idr *idr, unsigned long id)
-{
- return radix_tree_delete_item(&idr->idr_rt, id, NULL);
-}
-
-static inline void *idr_remove(struct idr *idr, int id)
+/**
+ * idr_init_base() - Initialise an IDR.
+ * @idr: IDR handle.
+ * @base: The base value for the IDR.
+ *
+ * This variation of idr_init() creates an IDR which will allocate IDs
+ * starting at %base.
+ */
+static inline void idr_init_base(struct idr *idr, int base)
{
- return idr_remove_ext(idr, id);
+ INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
+ idr->idr_base = base;
+ idr->idr_next = 0;
}
+/**
+ * idr_init() - Initialise an IDR.
+ * @idr: IDR handle.
+ *
+ * Initialise a dynamically allocated IDR. To initialise a
+ * statically allocated IDR, use DEFINE_IDR().
+ */
static inline void idr_init(struct idr *idr)
{
- INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER);
- idr->idr_next = 0;
+ idr_init_base(idr, 0);
}
+/**
+ * idr_is_empty() - Are there any IDs allocated?
+ * @idr: IDR handle.
+ *
+ * Return: %true if any IDs have been allocated from this IDR.
+ */
static inline bool idr_is_empty(const struct idr *idr)
{
return radix_tree_empty(&idr->idr_rt) &&
@@ -174,50 +162,38 @@ static inline void idr_preload_end(void)
}
/**
- * idr_find - return pointer for given id
- * @idr: idr handle
- * @id: lookup key
- *
- * Return the pointer given the id it has been registered with. A %NULL
- * return indicates that @id is not valid or you passed %NULL in
- * idr_get_new().
+ * idr_for_each_entry() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor
+ * @id: Entry ID.
*
- * This function can be called under rcu_read_lock(), given that the leaf
- * pointers lifetimes are correctly managed.
+ * @entry and @id do not need to be initialized before the loop, and
+ * after normal termination @entry is left with the value NULL. This
+ * is convenient for a "not found" value.
*/
-static inline void *idr_find_ext(const struct idr *idr, unsigned long id)
-{
- return radix_tree_lookup(&idr->idr_rt, id);
-}
-
-static inline void *idr_find(const struct idr *idr, int id)
-{
- return idr_find_ext(idr, id);
-}
+#define idr_for_each_entry(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
/**
- * idr_for_each_entry - iterate over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type.
+ * @idr: IDR handle.
+ * @entry: The type * to use as cursor.
+ * @id: Entry ID.
*
* @entry and @id do not need to be initialized before the loop, and
- * after normal terminatinon @entry is left with the value NULL. This
+ * after normal termination @entry is left with the value NULL. This
* is convenient for a "not found" value.
*/
-#define idr_for_each_entry(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id)
-#define idr_for_each_entry_ext(idr, entry, id) \
- for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id)
+#define idr_for_each_entry_ul(idr, entry, id) \
+ for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id)
/**
- * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type
- * @idr: idr handle
- * @entry: the type * to use as cursor
- * @id: id entry's key
+ * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type
+ * @idr: IDR handle.
+ * @entry: The type * to use as a cursor.
+ * @id: Entry ID.
*
- * Continue to iterate over list of given type, continuing after
- * the current position.
+ * Continue to iterate over entries, continuing after the current position.
*/
#define idr_for_each_entry_continue(idr, entry, id) \
for ((entry) = idr_get_next((idr), &(id)); \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f3274d9f46a2..8dad3dd26eae 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -83,7 +83,9 @@
/*
* Decoding Capability Register
*/
+#define cap_5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
+#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 6bdd4b9f6611..ac0062b74aed 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -533,7 +533,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
-int __must_check vcpu_load(struct kvm_vcpu *vcpu);
+void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
@@ -1260,4 +1260,16 @@ static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */
+#ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL
+long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg);
+#else
+static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
+ unsigned int ioctl,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
+
#endif
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index cddfaff4d0b7..4fa654e4b5a9 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -34,9 +34,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
extern struct of_device_id __iommu_of_table;
-typedef int (*of_iommu_init_fn)(struct device_node *);
-
-#define IOMMU_OF_DECLARE(name, compat, fn) \
- _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
+#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eb13e84e1fef..a6b30667a331 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -149,6 +149,8 @@
#define PCI_VENDOR_ID_DYNALINK 0x0675
#define PCI_DEVICE_ID_DYNALINK_IS64PH 0x1702
+#define PCI_VENDOR_ID_UBIQUITI 0x0777
+
#define PCI_VENDOR_ID_BERKOM 0x0871
#define PCI_DEVICE_ID_BERKOM_A1T 0xffa1
#define PCI_DEVICE_ID_BERKOM_T_CONCEPT 0xffa2
diff --git a/include/linux/psci.h b/include/linux/psci.h
index f724fd8c78e8..8b1b3b5935ab 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -25,6 +25,17 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_init_idle(unsigned int cpu);
int psci_cpu_suspend_enter(unsigned long index);
+enum psci_conduit {
+ PSCI_CONDUIT_NONE,
+ PSCI_CONDUIT_SMC,
+ PSCI_CONDUIT_HVC,
+};
+
+enum smccc_version {
+ SMCCC_VERSION_1_0,
+ SMCCC_VERSION_1_1,
+};
+
struct psci_operations {
u32 (*get_version)(void);
int (*cpu_suspend)(u32 state, unsigned long entry_point);
@@ -34,6 +45,8 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
+ enum psci_conduit conduit;
+ enum smccc_version smccc_version;
};
extern struct psci_operations psci_ops;
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
new file mode 100644
index 000000000000..93addfa34061
--- /dev/null
+++ b/include/linux/psp-sev.h
@@ -0,0 +1,606 @@
+/*
+ * AMD Secure Encrypted Virtualization (SEV) driver interface
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV spec 0.14 is available at:
+ * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_SEV_H__
+#define __PSP_SEV_H__
+
+#include <uapi/linux/psp-sev.h>
+
+#ifdef CONFIG_X86
+#include <linux/mem_encrypt.h>
+
+#define __psp_pa(x) __sme_pa(x)
+#else
+#define __psp_pa(x) __pa(x)
+#endif
+
+#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
+
+/**
+ * SEV platform state
+ */
+enum sev_state {
+ SEV_STATE_UNINIT = 0x0,
+ SEV_STATE_INIT = 0x1,
+ SEV_STATE_WORKING = 0x2,
+
+ SEV_STATE_MAX
+};
+
+/**
+ * SEV platform and guest management commands
+ */
+enum sev_cmd {
+ /* platform commands */
+ SEV_CMD_INIT = 0x001,
+ SEV_CMD_SHUTDOWN = 0x002,
+ SEV_CMD_FACTORY_RESET = 0x003,
+ SEV_CMD_PLATFORM_STATUS = 0x004,
+ SEV_CMD_PEK_GEN = 0x005,
+ SEV_CMD_PEK_CSR = 0x006,
+ SEV_CMD_PEK_CERT_IMPORT = 0x007,
+ SEV_CMD_PDH_CERT_EXPORT = 0x008,
+ SEV_CMD_PDH_GEN = 0x009,
+ SEV_CMD_DF_FLUSH = 0x00A,
+
+ /* Guest commands */
+ SEV_CMD_DECOMMISSION = 0x020,
+ SEV_CMD_ACTIVATE = 0x021,
+ SEV_CMD_DEACTIVATE = 0x022,
+ SEV_CMD_GUEST_STATUS = 0x023,
+
+ /* Guest launch commands */
+ SEV_CMD_LAUNCH_START = 0x030,
+ SEV_CMD_LAUNCH_UPDATE_DATA = 0x031,
+ SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032,
+ SEV_CMD_LAUNCH_MEASURE = 0x033,
+ SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
+ SEV_CMD_LAUNCH_FINISH = 0x035,
+
+ /* Guest migration commands (outgoing) */
+ SEV_CMD_SEND_START = 0x040,
+ SEV_CMD_SEND_UPDATE_DATA = 0x041,
+ SEV_CMD_SEND_UPDATE_VMSA = 0x042,
+ SEV_CMD_SEND_FINISH = 0x043,
+
+ /* Guest migration commands (incoming) */
+ SEV_CMD_RECEIVE_START = 0x050,
+ SEV_CMD_RECEIVE_UPDATE_DATA = 0x051,
+ SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052,
+ SEV_CMD_RECEIVE_FINISH = 0x053,
+
+ /* Guest debug commands */
+ SEV_CMD_DBG_DECRYPT = 0x060,
+ SEV_CMD_DBG_ENCRYPT = 0x061,
+
+ SEV_CMD_MAX,
+};
+
+/**
+ * struct sev_data_init - INIT command parameters
+ *
+ * @flags: processing flags
+ * @tmr_address: system physical address used for SEV-ES
+ * @tmr_len: len of tmr_address
+ */
+struct sev_data_init {
+ u32 flags; /* In */
+ u32 reserved; /* In */
+ u64 tmr_address; /* In */
+ u32 tmr_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_pek_csr - PEK_CSR command parameters
+ *
+ * @address: PEK certificate chain
+ * @len: len of certificate
+ */
+struct sev_data_pek_csr {
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters
+ *
+ * @pek_address: PEK certificate chain
+ * @pek_len: len of PEK certificate
+ * @oca_address: OCA certificate chain
+ * @oca_len: len of OCA certificate
+ */
+struct sev_data_pek_cert_import {
+ u64 pek_cert_address; /* In */
+ u32 pek_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 oca_cert_address; /* In */
+ u32 oca_cert_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+ * @pdh_len: len of PDH certificate
+ * @cert_chain_address: PDH certificate chain
+ * @cert_chain_len: len of PDH certificate chain
+ */
+struct sev_data_pdh_cert_export {
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In/Out */
+ u32 reserved; /* In */
+ u64 cert_chain_address; /* In */
+ u32 cert_chain_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_decommission - DECOMMISSION command parameters
+ *
+ * @handle: handle of the VM to decommission
+ */
+struct sev_data_decommission {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_activate - ACTIVATE command parameters
+ *
+ * @handle: handle of the VM to activate
+ * @asid: asid assigned to the VM
+ */
+struct sev_data_activate {
+ u32 handle; /* In */
+ u32 asid; /* In */
+} __packed;
+
+/**
+ * struct sev_data_deactivate - DEACTIVATE command parameters
+ *
+ * @handle: handle of the VM to deactivate
+ */
+struct sev_data_deactivate {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_guest_status - SEV GUEST_STATUS command parameters
+ *
+ * @handle: handle of the VM to retrieve status
+ * @policy: policy information for the VM
+ * @asid: current ASID of the VM
+ * @state: current state of the VM
+ */
+struct sev_data_guest_status {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u32 asid; /* Out */
+ u8 state; /* Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_start - LAUNCH_START command parameters
+ *
+ * @handle: handle assigned to the VM
+ * @policy: guest launch policy
+ * @dh_cert_address: physical address of DH certificate blob
+ * @dh_cert_len: len of DH certificate blob
+ * @session_address: physical address of session parameters
+ * @session_len: len of session parameters
+ */
+struct sev_data_launch_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 dh_cert_address; /* In */
+ u32 dh_cert_len; /* In */
+ u32 reserved; /* In */
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter
+ *
+ * @handle: handle of the VM to update
+ * @len: len of memory to be encrypted
+ * @address: physical address of memory region to encrypt
+ */
+struct sev_data_launch_update_data {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM
+ * @address: physical address of memory region to encrypt
+ * @len: len of memory region to encrypt
+ */
+struct sev_data_launch_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters
+ *
+ * @handle: handle of the VM to process
+ * @address: physical address containing the measurement blob
+ * @len: len of measurement blob
+ */
+struct sev_data_launch_measure {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 address; /* In */
+ u32 len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_launch_secret - LAUNCH_SECRET command parameters
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing the packet header
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest_paddr
+ * @trans_address: physical address of transport memory buffer
+ * @trans_len: len of transport memory buffer
+ */
+struct sev_data_launch_secret {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_launch_finish - LAUNCH_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_launch_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_start - SEND_START command parameters
+ *
+ * @handle: handle of the VM to process
+ * @policy: policy information for the VM
+ * @pdh_cert_address: physical address containing PDH certificate
+ * @pdh_cert_len: len of PDH certificate
+ * @plat_certs_address: physical address containing platform certificate
+ * @plat_certs_len: len of platform certificate
+ * @amd_certs_address: physical address containing AMD certificate
+ * @amd_certs_len: len of AMD certificate
+ * @session_address: physical address containing Session data
+ * @session_len: len of session data
+ */
+struct sev_data_send_start {
+ u32 handle; /* In */
+ u32 policy; /* Out */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 plat_cert_address; /* In */
+ u32 plat_cert_len; /* In */
+ u32 reserved2;
+ u64 amd_cert_address; /* In */
+ u32 amd_cert_len; /* In */
+ u32 reserved3;
+ u64 session_address; /* In */
+ u32 session_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_DATA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_update - SEND_UPDATE_VMSA command
+ *
+ * @handle: handle of the VM to process
+ * @hdr_address: physical address containing packet header
+ * @hdr_len: len of packet header
+ * @guest_address: physical address of guest memory region to send
+ * @guest_len: len of guest memory region to send
+ * @trans_address: physical address of host memory region
+ * @trans_len: len of host memory region
+ */
+struct sev_data_send_update_vmsa {
+ u32 handle; /* In */
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In/Out */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_send_finish - SEND_FINISH command parameters
+ *
+ * @handle: handle of the VM to process
+ */
+struct sev_data_send_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_start - RECEIVE_START command parameters
+ *
+ * @handle: handle of the VM to perform receive operation
+ * @pdh_cert_address: system physical address containing PDH certificate blob
+ * @pdh_cert_len: len of PDH certificate blob
+ * @session_address: system physical address containing session blob
+ * @session_len: len of session blob
+ */
+struct sev_data_receive_start {
+ u32 handle; /* In/Out */
+ u32 policy; /* In */
+ u64 pdh_cert_address; /* In */
+ u32 pdh_cert_len; /* In */
+ u32 reserved1;
+ u64 session_address; /* In */
+ u32 session_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_data {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters
+ *
+ * @handle: handle of the VM to update
+ * @hdr_address: physical address containing packet header blob
+ * @hdr_len: len of packet header
+ * @guest_address: system physical address of guest memory region
+ * @guest_len: len of guest memory region
+ * @trans_address: system physical address of transport buffer
+ * @trans_len: len of transport buffer
+ */
+struct sev_data_receive_update_vmsa {
+ u32 handle; /* In */
+ u32 reserved1;
+ u64 hdr_address; /* In */
+ u32 hdr_len; /* In */
+ u32 reserved2;
+ u64 guest_address; /* In */
+ u32 guest_len; /* In */
+ u32 reserved3;
+ u64 trans_address; /* In */
+ u32 trans_len; /* In */
+} __packed;
+
+/**
+ * struct sev_data_receive_finish - RECEIVE_FINISH command parameters
+ *
+ * @handle: handle of the VM to finish
+ */
+struct sev_data_receive_finish {
+ u32 handle; /* In */
+} __packed;
+
+/**
+ * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
+ *
+ * @handle: handle of the VM to perform debug operation
+ * @src_addr: source address of data to operate on
+ * @dst_addr: destination address of data to operate on
+ * @len: len of data to operate on
+ */
+struct sev_data_dbg {
+ u32 handle; /* In */
+ u32 reserved;
+ u64 src_addr; /* In */
+ u64 dst_addr; /* In */
+ u32 len; /* In */
+} __packed;
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+/**
+ * sev_platform_init - perform SEV INIT command
+ *
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_init(int *error);
+
+/**
+ * sev_platform_status - perform SEV PLATFORM_STATUS command
+ *
+ * @status: sev_user_data_status structure to be processed
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ */
+int sev_platform_status(struct sev_user_data_status *status, int *error);
+
+/**
+ * sev_issue_cmd_external_user - issue SEV command by other driver with a file
+ * handle.
+ *
+ * This function can be used by other drivers to issue a SEV command on
+ * behalf of userspace. The caller must pass a valid SEV file descriptor
+ * so that we know that it has access to SEV device.
+ *
+ * @filep - SEV device file pointer
+ * @cmd - command to issue
+ * @data - command buffer
+ * @error: SEV command return code
+ *
+ * Returns:
+ * 0 if the SEV successfully processed the command
+ * -%ENODEV if the SEV device is not available
+ * -%ENOTSUPP if the SEV does not support SEV
+ * -%ETIMEDOUT if the SEV command timed out
+ * -%EIO if the SEV returned a non-zero return code
+ * -%EINVAL if the SEV file descriptor is not valid
+ */
+int sev_issue_cmd_external_user(struct file *filep, unsigned int id,
+ void *data, int *error);
+
+/**
+ * sev_guest_deactivate - perform SEV DEACTIVATE command
+ *
+ * @deactivate: sev_data_deactivate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_deactivate(struct sev_data_deactivate *data, int *error);
+
+/**
+ * sev_guest_activate - perform SEV ACTIVATE command
+ *
+ * @activate: sev_data_activate structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_activate(struct sev_data_activate *data, int *error);
+
+/**
+ * sev_guest_df_flush - perform SEV DF_FLUSH command
+ *
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_df_flush(int *error);
+
+/**
+ * sev_guest_decommission - perform SEV DECOMMISSION command
+ *
+ * @decommission: sev_data_decommission structure to be processed
+ * @sev_ret: sev command return code
+ *
+ * Returns:
+ * 0 if the sev successfully processed the command
+ * -%ENODEV if the sev device is not available
+ * -%ENOTSUPP if the sev does not support SEV
+ * -%ETIMEDOUT if the sev command timed out
+ * -%EIO if the sev returned a non-zero return code
+ */
+int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+
+void *psp_copy_user_blob(u64 __user uaddr, u32 len);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int
+sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
+
+static inline int sev_platform_init(int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
+
+static inline int
+sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
+
+static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
+
+static inline int
+sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; }
+
+static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
+
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+#endif /* __PSP_SEV_H__ */
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 1883d6137e9b..b884b7794187 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -464,9 +464,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
+/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
+ * documentation for vmalloc for which of them are legal.
+ */
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kcalloc(size, sizeof(void *), gfp);
+ if (size * sizeof(void *) > KMALLOC_MAX_SIZE)
+ return NULL;
+ return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -601,7 +606,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
- kfree(old);
+ kvfree(old);
return 0;
}
@@ -641,7 +646,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -649,7 +654,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
nomem:
while (--i >= 0)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -664,7 +669,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
- kfree(r->queue);
+ kvfree(r->queue);
}
#endif /* _LINUX_PTR_RING_H */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 23a9c89c7ad9..fc55ff31eca7 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index,
int radix_tree_join(struct radix_tree_root *, unsigned long index,
unsigned new_order, void *);
-void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max);
-static inline void __rcu **idr_get_free(struct radix_tree_root *root,
- struct radix_tree_iter *iter,
- gfp_t gfp,
- int end)
-{
- return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX);
-}
-
-static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root,
- struct radix_tree_iter *iter,
- gfp_t gfp,
- unsigned long end)
-{
- return idr_get_free_cmn(root, iter, gfp, end - 1);
-}
enum {
RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 4c310c34ddad..b32df49a3bd5 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -376,10 +376,10 @@ extern int of_setup_earlycon(const struct earlycon_id *match,
const char *options);
#ifdef CONFIG_SERIAL_EARLYCON
-extern bool earlycon_init_is_deferred __initdata;
+extern bool earlycon_acpi_spcr_enable __initdata;
int setup_earlycon(char *buf);
#else
-static const bool earlycon_init_is_deferred;
+static const bool earlycon_acpi_spcr_enable;
static inline int setup_earlycon(char *buf) { return 0; }
#endif
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index d96e74e114c0..592653becd91 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -229,6 +229,9 @@ void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
rpc_action action,
int priority);
+void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue,
+ struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 995c6fe9ee90..4b731b046bcd 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
-extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index f5223bf2c420..062dc19b5840 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -213,11 +213,6 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
return nf_ct_delete(ct, 0, 0);
}
-/* These are for NAT. Icky. */
-extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-
/* Set all unconfirmed conntrack as dying */
void nf_ct_unconfirmed_destroy(struct net *);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index b22b22082733..833752dd0c58 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -14,6 +14,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
void (*gc)(struct work_struct *work);
+ void (*free)(struct nf_flowtable *ft);
const struct rhashtable_params *params;
nf_hookfn *hook;
struct module *owner;
@@ -89,12 +90,15 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
void flow_offload_free(struct flow_offload *flow);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
-void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct flow_offload *flow, void *data),
void *data);
+
+void nf_flow_table_cleanup(struct net *net, struct net_device *dev);
+
+void nf_flow_table_free(struct nf_flowtable *flow_table);
void nf_flow_offload_work_gc(struct work_struct *work);
extern const struct rhashtable_params nf_flow_offload_rhash_params;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 58278669cc55..e3fc667f9ac2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1983,6 +1983,11 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
#define TCP_ULP_MAX 128
#define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
+enum {
+ TCP_ULP_TLS,
+ TCP_ULP_BPF,
+};
+
struct tcp_ulp_ops {
struct list_head list;
@@ -1991,12 +1996,15 @@ struct tcp_ulp_ops {
/* cleanup ulp */
void (*release)(struct sock *sk);
+ int uid;
char name[TCP_ULP_NAME_MAX];
+ bool user_visible;
struct module *owner;
};
int tcp_register_ulp(struct tcp_ulp_ops *type);
void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
+int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index b6b3fb444a92..34a15d59ed88 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -53,6 +53,7 @@ struct target_backend_ops {
void (*free_prot)(struct se_device *);
struct configfs_attribute **tb_dev_attrib_attrs;
+ struct configfs_attribute **tb_dev_action_attrs;
};
struct sbc_ops {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 2c8d8115469d..9f9f5902af38 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -183,6 +183,7 @@ enum tcm_sense_reason_table {
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
+ TCM_LUN_BUSY = R(0x1e),
#undef R
};
@@ -808,6 +809,7 @@ struct se_device {
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation t10_pr;
struct se_dev_attrib dev_attrib;
+ struct config_group dev_action_group;
struct config_group dev_group;
struct config_group dev_pr_group;
struct se_dev_stat_grps dev_stat_grps;
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 8fb90a0819c3..0fb5ef939732 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1362,6 +1362,96 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_S390_CMMA_MIGRATION */
#define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
#define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
+/* Memory Encryption Commands */
+#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+
+struct kvm_enc_region {
+ __u64 addr;
+ __u64 size;
+};
+
+#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
+#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+
+/* Secure Encrypted Virtualization command */
+enum sev_cmd_id {
+ /* Guest initialization commands */
+ KVM_SEV_INIT = 0,
+ KVM_SEV_ES_INIT,
+ /* Guest launch commands */
+ KVM_SEV_LAUNCH_START,
+ KVM_SEV_LAUNCH_UPDATE_DATA,
+ KVM_SEV_LAUNCH_UPDATE_VMSA,
+ KVM_SEV_LAUNCH_SECRET,
+ KVM_SEV_LAUNCH_MEASURE,
+ KVM_SEV_LAUNCH_FINISH,
+ /* Guest migration commands (outgoing) */
+ KVM_SEV_SEND_START,
+ KVM_SEV_SEND_UPDATE_DATA,
+ KVM_SEV_SEND_UPDATE_VMSA,
+ KVM_SEV_SEND_FINISH,
+ /* Guest migration commands (incoming) */
+ KVM_SEV_RECEIVE_START,
+ KVM_SEV_RECEIVE_UPDATE_DATA,
+ KVM_SEV_RECEIVE_UPDATE_VMSA,
+ KVM_SEV_RECEIVE_FINISH,
+ /* Guest status and debug commands */
+ KVM_SEV_GUEST_STATUS,
+ KVM_SEV_DBG_DECRYPT,
+ KVM_SEV_DBG_ENCRYPT,
+ /* Guest certificates commands */
+ KVM_SEV_CERT_EXPORT,
+
+ KVM_SEV_NR_MAX,
+};
+
+struct kvm_sev_cmd {
+ __u32 id;
+ __u64 data;
+ __u32 error;
+ __u32 sev_fd;
+};
+
+struct kvm_sev_launch_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 dh_uaddr;
+ __u32 dh_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_launch_update_data {
+ __u64 uaddr;
+ __u32 len;
+};
+
+
+struct kvm_sev_launch_secret {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
+struct kvm_sev_launch_measure {
+ __u64 uaddr;
+ __u32 len;
+};
+
+struct kvm_sev_guest_status {
+ __u32 handle;
+ __u32 policy;
+ __u32 state;
+};
+
+struct kvm_sev_dbg {
+ __u64 src_uaddr;
+ __u64 dst_uaddr;
+ __u32 len;
+};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index 760e52a9640f..b3bcabe380da 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -88,6 +88,9 @@
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) \
((ver) & PSCI_VERSION_MINOR_MASK)
+#define PSCI_VERSION(maj, min) \
+ ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
+ ((min) & PSCI_VERSION_MINOR_MASK))
/* PSCI features decoding (>=1.0) */
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
new file mode 100644
index 000000000000..3d77fe91239a
--- /dev/null
+++ b/include/uapi/linux/psp-sev.h
@@ -0,0 +1,142 @@
+/*
+ * Userspace interface for AMD Secure Encrypted Virtualization (SEV)
+ * platform management commands.
+ *
+ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * SEV spec 0.14 is available at:
+ * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PSP_SEV_USER_H__
+#define __PSP_SEV_USER_H__
+
+#include <linux/types.h>
+
+/**
+ * SEV platform commands
+ */
+enum {
+ SEV_FACTORY_RESET = 0,
+ SEV_PLATFORM_STATUS,
+ SEV_PEK_GEN,
+ SEV_PEK_CSR,
+ SEV_PDH_GEN,
+ SEV_PDH_CERT_EXPORT,
+ SEV_PEK_CERT_IMPORT,
+
+ SEV_MAX,
+};
+
+/**
+ * SEV Firmware status code
+ */
+typedef enum {
+ SEV_RET_SUCCESS = 0,
+ SEV_RET_INVALID_PLATFORM_STATE,
+ SEV_RET_INVALID_GUEST_STATE,
+ SEV_RET_INAVLID_CONFIG,
+ SEV_RET_INVALID_len,
+ SEV_RET_ALREADY_OWNED,
+ SEV_RET_INVALID_CERTIFICATE,
+ SEV_RET_POLICY_FAILURE,
+ SEV_RET_INACTIVE,
+ SEV_RET_INVALID_ADDRESS,
+ SEV_RET_BAD_SIGNATURE,
+ SEV_RET_BAD_MEASUREMENT,
+ SEV_RET_ASID_OWNED,
+ SEV_RET_INVALID_ASID,
+ SEV_RET_WBINVD_REQUIRED,
+ SEV_RET_DFFLUSH_REQUIRED,
+ SEV_RET_INVALID_GUEST,
+ SEV_RET_INVALID_COMMAND,
+ SEV_RET_ACTIVE,
+ SEV_RET_HWSEV_RET_PLATFORM,
+ SEV_RET_HWSEV_RET_UNSAFE,
+ SEV_RET_UNSUPPORTED,
+ SEV_RET_MAX,
+} sev_ret_code;
+
+/**
+ * struct sev_user_data_status - PLATFORM_STATUS command parameters
+ *
+ * @major: major API version
+ * @minor: minor API version
+ * @state: platform state
+ * @flags: platform config flags
+ * @build: firmware build id for API version
+ * @guest_count: number of active guests
+ */
+struct sev_user_data_status {
+ __u8 api_major; /* Out */
+ __u8 api_minor; /* Out */
+ __u8 state; /* Out */
+ __u32 flags; /* Out */
+ __u8 build; /* Out */
+ __u32 guest_count; /* Out */
+} __packed;
+
+/**
+ * struct sev_user_data_pek_csr - PEK_CSR command parameters
+ *
+ * @address: PEK certificate chain
+ * @length: length of certificate
+ */
+struct sev_user_data_pek_csr {
+ __u64 address; /* In */
+ __u32 length; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_user_data_cert_import - PEK_CERT_IMPORT command parameters
+ *
+ * @pek_address: PEK certificate chain
+ * @pek_len: length of PEK certificate
+ * @oca_address: OCA certificate chain
+ * @oca_len: length of OCA certificate
+ */
+struct sev_user_data_pek_cert_import {
+ __u64 pek_cert_address; /* In */
+ __u32 pek_cert_len; /* In */
+ __u64 oca_cert_address; /* In */
+ __u32 oca_cert_len; /* In */
+} __packed;
+
+/**
+ * struct sev_user_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+ * @pdh_len: length of PDH certificate
+ * @cert_chain_address: PDH certificate chain
+ * @cert_chain_len: length of PDH certificate chain
+ */
+struct sev_user_data_pdh_cert_export {
+ __u64 pdh_cert_address; /* In */
+ __u32 pdh_cert_len; /* In/Out */
+ __u64 cert_chain_address; /* In */
+ __u32 cert_chain_len; /* In/Out */
+} __packed;
+
+/**
+ * struct sev_issue_cmd - SEV ioctl parameters
+ *
+ * @cmd: SEV commands to execute
+ * @opaque: pointer to the command structure
+ * @error: SEV FW return code on failure
+ */
+struct sev_issue_cmd {
+ __u32 cmd; /* In */
+ __u64 data; /* In */
+ __u32 error; /* Out */
+} __packed;
+
+#define SEV_IOC_TYPE 'S'
+#define SEV_ISSUE_CMD _IOWR(SEV_IOC_TYPE, 0x0, struct sev_issue_cmd)
+
+#endif /* __PSP_USER_SEV_H */
diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h
index 343d7ddefe04..4e8b8304b793 100644
--- a/include/uapi/linux/virtio_balloon.h
+++ b/include/uapi/linux/virtio_balloon.h
@@ -52,7 +52,8 @@ struct virtio_balloon_config {
#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
#define VIRTIO_BALLOON_S_AVAIL 6 /* Available memory as in /proc */
-#define VIRTIO_BALLOON_S_NR 7
+#define VIRTIO_BALLOON_S_CACHES 7 /* Disk caches */
+#define VIRTIO_BALLOON_S_NR 8
/*
* Memory statistics structure.
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 0314d1783d77..48c33417d13c 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -86,9 +86,10 @@ struct smap_psock {
struct work_struct tx_work;
struct work_struct gc_work;
+ struct proto *sk_proto;
+ void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
- void (*save_state_change)(struct sock *sk);
};
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -96,12 +97,102 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+static struct proto tcp_bpf_proto;
+static int bpf_tcp_init(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (unlikely(psock->sk_proto)) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ psock->save_close = sk->sk_prot->close;
+ psock->sk_proto = sk->sk_prot;
+ sk->sk_prot = &tcp_bpf_proto;
+ rcu_read_unlock();
+ return 0;
+}
+
+static void bpf_tcp_release(struct sock *sk)
+{
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+
+ if (likely(psock)) {
+ sk->sk_prot = psock->sk_proto;
+ psock->sk_proto = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+ void (*close_fun)(struct sock *sk, long timeout);
+ struct smap_psock_map_entry *e, *tmp;
+ struct smap_psock *psock;
+ struct sock *osk;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ return sk->sk_prot->close(sk, timeout);
+ }
+
+ /* The psock may be destroyed anytime after exiting the RCU critial
+ * section so by the time we use close_fun the psock may no longer
+ * be valid. However, bpf_tcp_close is called with the sock lock
+ * held so the close hook and sk are still valid.
+ */
+ close_fun = psock->save_close;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ osk = cmpxchg(e->entry, sk, NULL);
+ if (osk == sk) {
+ list_del(&e->list);
+ smap_release_sock(psock, sk);
+ }
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ rcu_read_unlock();
+ close_fun(sk, timeout);
+}
+
enum __sk_action {
__SK_DROP = 0,
__SK_PASS,
__SK_REDIRECT,
};
+static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
+ .name = "bpf_tcp",
+ .uid = TCP_ULP_BPF,
+ .user_visible = false,
+ .owner = NULL,
+ .init = bpf_tcp_init,
+ .release = bpf_tcp_release,
+};
+
+static int bpf_tcp_ulp_register(void)
+{
+ tcp_bpf_proto = tcp_prot;
+ tcp_bpf_proto.close = bpf_tcp_close;
+ return tcp_register_ulp(&bpf_tcp_ulp_ops);
+}
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -166,68 +257,6 @@ static void smap_report_sk_error(struct smap_psock *psock, int err)
sk->sk_error_report(sk);
}
-static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-
-/* Called with lock_sock(sk) held */
-static void smap_state_change(struct sock *sk)
-{
- struct smap_psock_map_entry *e, *tmp;
- struct smap_psock *psock;
- struct socket_wq *wq;
- struct sock *osk;
-
- rcu_read_lock();
-
- /* Allowing transitions into an established syn_recv states allows
- * for early binding sockets to a smap object before the connection
- * is established.
- */
- switch (sk->sk_state) {
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- case TCP_ESTABLISHED:
- break;
- case TCP_CLOSE_WAIT:
- case TCP_CLOSING:
- case TCP_LAST_ACK:
- case TCP_FIN_WAIT1:
- case TCP_FIN_WAIT2:
- case TCP_LISTEN:
- break;
- case TCP_CLOSE:
- /* Only release if the map entry is in fact the sock in
- * question. There is a case where the operator deletes
- * the sock from the map, but the TCP sock is closed before
- * the psock is detached. Use cmpxchg to verify correct
- * sock is removed.
- */
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- write_lock_bh(&sk->sk_callback_lock);
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- osk = cmpxchg(e->entry, sk, NULL);
- if (osk == sk) {
- list_del(&e->list);
- smap_release_sock(psock, sk);
- }
- }
- write_unlock_bh(&sk->sk_callback_lock);
- break;
- default:
- psock = smap_psock_sk(sk);
- if (unlikely(!psock))
- break;
- smap_report_sk_error(psock, EPIPE);
- break;
- }
-
- wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
- wake_up_interruptible_all(&wq->wait);
- rcu_read_unlock();
-}
-
static void smap_read_sock_strparser(struct strparser *strp,
struct sk_buff *skb)
{
@@ -322,10 +351,8 @@ static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
return;
sk->sk_data_ready = psock->save_data_ready;
sk->sk_write_space = psock->save_write_space;
- sk->sk_state_change = psock->save_state_change;
psock->save_data_ready = NULL;
psock->save_write_space = NULL;
- psock->save_state_change = NULL;
strp_stop(&psock->strp);
psock->strp_enabled = false;
}
@@ -350,6 +377,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
if (psock->refcnt)
return;
+ tcp_cleanup_ulp(sock);
smap_stop_sock(psock, sock);
clear_bit(SMAP_TX_RUNNING, &psock->state);
rcu_assign_sk_user_data(sock, NULL);
@@ -427,10 +455,8 @@ static void smap_start_sock(struct smap_psock *psock, struct sock *sk)
return;
psock->save_data_ready = sk->sk_data_ready;
psock->save_write_space = sk->sk_write_space;
- psock->save_state_change = sk->sk_state_change;
sk->sk_data_ready = smap_data_ready;
sk->sk_write_space = smap_write_space;
- sk->sk_state_change = smap_state_change;
psock->strp_enabled = true;
}
@@ -509,6 +535,10 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (attr->value_size > KMALLOC_MAX_SIZE)
return ERR_PTR(-E2BIG);
+ err = bpf_tcp_ulp_register();
+ if (err && err != -EEXIST)
+ return ERR_PTR(err);
+
stab = kzalloc(sizeof(*stab), GFP_USER);
if (!stab)
return ERR_PTR(-ENOMEM);
@@ -590,11 +620,6 @@ static void sock_map_free(struct bpf_map *map)
}
rcu_read_unlock();
- if (stab->bpf_verdict)
- bpf_prog_put(stab->bpf_verdict);
- if (stab->bpf_parse)
- bpf_prog_put(stab->bpf_parse);
-
sock_map_remove_complete(stab);
}
@@ -754,6 +779,10 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
goto out_progs;
}
+ err = tcp_set_ulp_id(sock, TCP_ULP_BPF);
+ if (err)
+ goto out_progs;
+
set_bit(SMAP_TX_RUNNING, &psock->state);
}
@@ -866,6 +895,19 @@ static int sock_map_update_elem(struct bpf_map *map,
return err;
}
+static void sock_map_release(struct bpf_map *map, struct file *map_file)
+{
+ struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ struct bpf_prog *orig;
+
+ orig = xchg(&stab->bpf_parse, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+ orig = xchg(&stab->bpf_verdict, NULL);
+ if (orig)
+ bpf_prog_put(orig);
+}
+
const struct bpf_map_ops sock_map_ops = {
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
@@ -873,6 +915,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_get_next_key = sock_map_get_next_key,
.map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_map_delete_elem,
+ .map_release = sock_map_release,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config
index 8d9643767142..108fecc20fc1 100644
--- a/kernel/configs/kvm_guest.config
+++ b/kernel/configs/kvm_guest.config
@@ -18,6 +18,7 @@ CONFIG_VIRTUALIZATION=y
CONFIG_HYPERVISOR_GUEST=y
CONFIG_PARAVIRT=y
CONFIG_KVM_GUEST=y
+CONFIG_S390_GUEST=y
CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BLK=y
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index dabd9d167d42..eac9ce2c57a2 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4456,7 +4456,6 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
func_g.type = filter_parse_regex(glob, strlen(glob),
&func_g.search, &not);
func_g.len = strlen(func_g.search);
- func_g.search = glob;
/* we do not support '!' for function probes */
if (WARN_ON(not))
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 61e7f0678d33..a764aec3c9a1 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -400,7 +400,6 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
for (i = 0; i < len; i++) {
if (buff[i] == '*') {
if (!i) {
- *search = buff + 1;
type = MATCH_END_ONLY;
} else if (i == len - 1) {
if (type == MATCH_END_ONLY)
@@ -410,14 +409,14 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
buff[i] = 0;
break;
} else { /* pattern continues, use full glob */
- type = MATCH_GLOB;
- break;
+ return MATCH_GLOB;
}
} else if (strchr("[?\\", buff[i])) {
- type = MATCH_GLOB;
- break;
+ return MATCH_GLOB;
}
}
+ if (buff[0] == '*')
+ *search = buff + 1;
return type;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b66c264d4194..6088408ef26c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -351,7 +351,6 @@ config SECTION_MISMATCH_WARN_ONLY
#
config ARCH_WANT_FRAME_POINTERS
bool
- help
config FRAME_POINTER
bool "Compile the kernel with frame pointers"
diff --git a/lib/idr.c b/lib/idr.c
index 2593ce513a18..c98d77fcf393 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,4 +1,5 @@
#include <linux/bitmap.h>
+#include <linux/bug.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/slab.h>
@@ -7,71 +8,184 @@
DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
static DEFINE_SPINLOCK(simple_ida_lock);
-int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index,
- unsigned long start, unsigned long end, gfp_t gfp,
- bool ext)
+/**
+ * idr_alloc_u32() - Allocate an ID.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @nextid: Pointer to an ID.
+ * @max: The maximum ID to allocate (inclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @nextid and @max.
+ * Note that @max is inclusive whereas the @end parameter to idr_alloc()
+ * is exclusive. The new ID is assigned to @nextid before the pointer
+ * is inserted into the IDR, so if @nextid points into the object pointed
+ * to by @ptr, a concurrent lookup will not find an uninitialised ID.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found. If an error occurred,
+ * @nextid is unchanged.
+ */
+int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
+ unsigned long max, gfp_t gfp)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ int base = idr->idr_base;
+ int id = *nextid;
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return -EINVAL;
+ if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
+ idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
- radix_tree_iter_init(&iter, start);
- if (ext)
- slot = idr_get_free_ext(&idr->idr_rt, &iter, gfp, end);
- else
- slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
+ id = (id < base) ? 0 : id - base;
+ radix_tree_iter_init(&iter, id);
+ slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
if (IS_ERR(slot))
return PTR_ERR(slot);
+ *nextid = iter.index + base;
+ /* there is a memory barrier inside radix_tree_iter_replace() */
radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
- if (index)
- *index = iter.index;
return 0;
}
-EXPORT_SYMBOL_GPL(idr_alloc_cmn);
+EXPORT_SYMBOL_GPL(idr_alloc_u32);
/**
- * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
- * @idr: idr handle
- * @ptr: pointer to be associated with the new id
- * @start: the minimum id (inclusive)
- * @end: the maximum id (exclusive)
- * @gfp: memory allocation flags
- *
- * Allocates an ID larger than the last ID allocated if one is available.
- * If not, it will attempt to allocate the smallest ID that is larger or
- * equal to @start.
+ * idr_alloc() - Allocate an ID.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @start: The minimum ID (inclusive).
+ * @end: The maximum ID (exclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @start and @end. If
+ * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
+ * callers to use @start + N as @end as long as N is within integer range.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found.
*/
-int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
+int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
{
- int id, curr = idr->idr_next;
+ u32 id = start;
+ int ret;
+
+ if (WARN_ON_ONCE(start < 0))
+ return -EINVAL;
+
+ ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
+ if (ret)
+ return ret;
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(idr_alloc);
- if (curr < start)
- curr = start;
+/**
+ * idr_alloc_cyclic() - Allocate an ID cyclically.
+ * @idr: IDR handle.
+ * @ptr: Pointer to be associated with the new ID.
+ * @start: The minimum ID (inclusive).
+ * @end: The maximum ID (exclusive).
+ * @gfp: Memory allocation flags.
+ *
+ * Allocates an unused ID in the range specified by @nextid and @end. If
+ * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
+ * callers to use @start + N as @end as long as N is within integer range.
+ * The search for an unused ID will start at the last ID allocated and will
+ * wrap around to @start if no free IDs are found before reaching @end.
+ *
+ * The caller should provide their own locking to ensure that two
+ * concurrent modifications to the IDR are not possible. Read-only
+ * accesses to the IDR may be done under the RCU read lock or may
+ * exclude simultaneous writers.
+ *
+ * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
+ * or -ENOSPC if no free IDs could be found.
+ */
+int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
+{
+ u32 id = idr->idr_next;
+ int err, max = end > 0 ? end - 1 : INT_MAX;
- id = idr_alloc(idr, ptr, curr, end, gfp);
- if ((id == -ENOSPC) && (curr > start))
- id = idr_alloc(idr, ptr, start, curr, gfp);
+ if ((int)id < start)
+ id = start;
- if (id >= 0)
- idr->idr_next = id + 1U;
+ err = idr_alloc_u32(idr, ptr, &id, max, gfp);
+ if ((err == -ENOSPC) && (id > start)) {
+ id = start;
+ err = idr_alloc_u32(idr, ptr, &id, max, gfp);
+ }
+ if (err)
+ return err;
+ idr->idr_next = id + 1;
return id;
}
EXPORT_SYMBOL(idr_alloc_cyclic);
/**
- * idr_for_each - iterate through all stored pointers
- * @idr: idr handle
- * @fn: function to be called for each pointer
- * @data: data passed to callback function
+ * idr_remove() - Remove an ID from the IDR.
+ * @idr: IDR handle.
+ * @id: Pointer ID.
+ *
+ * Removes this ID from the IDR. If the ID was not previously in the IDR,
+ * this function returns %NULL.
+ *
+ * Since this function modifies the IDR, the caller should provide their
+ * own locking to ensure that concurrent modification of the same IDR is
+ * not possible.
+ *
+ * Return: The pointer formerly associated with this ID.
+ */
+void *idr_remove(struct idr *idr, unsigned long id)
+{
+ return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
+}
+EXPORT_SYMBOL_GPL(idr_remove);
+
+/**
+ * idr_find() - Return pointer for given ID.
+ * @idr: IDR handle.
+ * @id: Pointer ID.
+ *
+ * Looks up the pointer associated with this ID. A %NULL pointer may
+ * indicate that @id is not allocated or that the %NULL pointer was
+ * associated with this ID.
+ *
+ * This function can be called under rcu_read_lock(), given that the leaf
+ * pointers lifetimes are correctly managed.
+ *
+ * Return: The pointer associated with this ID.
+ */
+void *idr_find(const struct idr *idr, unsigned long id)
+{
+ return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
+}
+EXPORT_SYMBOL_GPL(idr_find);
+
+/**
+ * idr_for_each() - Iterate through all stored pointers.
+ * @idr: IDR handle.
+ * @fn: Function to be called for each pointer.
+ * @data: Data passed to callback function.
*
* The callback function will be called for each entry in @idr, passing
- * the id, the pointer and the data pointer passed to this function.
+ * the ID, the entry and @data.
*
* If @fn returns anything other than %0, the iteration stops and that
* value is returned from this function.
@@ -86,9 +200,14 @@ int idr_for_each(const struct idr *idr,
{
struct radix_tree_iter iter;
void __rcu **slot;
+ int base = idr->idr_base;
radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
- int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
+ int ret;
+
+ if (WARN_ON_ONCE(iter.index > INT_MAX))
+ break;
+ ret = fn(iter.index + base, rcu_dereference_raw(*slot), data);
if (ret)
return ret;
}
@@ -98,9 +217,9 @@ int idr_for_each(const struct idr *idr,
EXPORT_SYMBOL(idr_for_each);
/**
- * idr_get_next - Find next populated entry
- * @idr: idr handle
- * @nextid: Pointer to lowest possible ID to return
+ * idr_get_next() - Find next populated entry.
+ * @idr: IDR handle.
+ * @nextid: Pointer to an ID.
*
* Returns the next populated entry in the tree with an ID greater than
* or equal to the value pointed to by @nextid. On exit, @nextid is updated
@@ -111,35 +230,55 @@ void *idr_get_next(struct idr *idr, int *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ int base = idr->idr_base;
+ int id = *nextid;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
+ id = (id < base) ? 0 : id - base;
+ slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
if (!slot)
return NULL;
+ id = iter.index + base;
+
+ if (WARN_ON_ONCE(id > INT_MAX))
+ return NULL;
- *nextid = iter.index;
+ *nextid = id;
return rcu_dereference_raw(*slot);
}
EXPORT_SYMBOL(idr_get_next);
-void *idr_get_next_ext(struct idr *idr, unsigned long *nextid)
+/**
+ * idr_get_next_ul() - Find next populated entry.
+ * @idr: IDR handle.
+ * @nextid: Pointer to an ID.
+ *
+ * Returns the next populated entry in the tree with an ID greater than
+ * or equal to the value pointed to by @nextid. On exit, @nextid is updated
+ * to the ID of the found value. To use in a loop, the value pointed to by
+ * nextid must be incremented by the user.
+ */
+void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
{
struct radix_tree_iter iter;
void __rcu **slot;
+ unsigned long base = idr->idr_base;
+ unsigned long id = *nextid;
- slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
+ id = (id < base) ? 0 : id - base;
+ slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
if (!slot)
return NULL;
- *nextid = iter.index;
+ *nextid = iter.index + base;
return rcu_dereference_raw(*slot);
}
-EXPORT_SYMBOL(idr_get_next_ext);
+EXPORT_SYMBOL(idr_get_next_ul);
/**
- * idr_replace - replace pointer for given id
- * @idr: idr handle
- * @ptr: New pointer to associate with the ID
- * @id: Lookup key
+ * idr_replace() - replace pointer for given ID.
+ * @idr: IDR handle.
+ * @ptr: New pointer to associate with the ID.
+ * @id: ID to change.
*
* Replace the pointer registered with an ID and return the old value.
* This function can be called under the RCU read lock concurrently with
@@ -147,18 +286,9 @@ EXPORT_SYMBOL(idr_get_next_ext);
* the one being replaced!).
*
* Returns: the old value on success. %-ENOENT indicates that @id was not
- * found. %-EINVAL indicates that @id or @ptr were not valid.
+ * found. %-EINVAL indicates that @ptr was not valid.
*/
-void *idr_replace(struct idr *idr, void *ptr, int id)
-{
- if (id < 0)
- return ERR_PTR(-EINVAL);
-
- return idr_replace_ext(idr, ptr, id);
-}
-EXPORT_SYMBOL(idr_replace);
-
-void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
+void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
{
struct radix_tree_node *node;
void __rcu **slot = NULL;
@@ -166,6 +296,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
return ERR_PTR(-EINVAL);
+ id -= idr->idr_base;
entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
@@ -175,7 +306,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id)
return entry;
}
-EXPORT_SYMBOL(idr_replace_ext);
+EXPORT_SYMBOL(idr_replace);
/**
* DOC: IDA description
@@ -235,7 +366,7 @@ EXPORT_SYMBOL(idr_replace_ext);
* bitmap, which is excessive.
*/
-#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
+#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
/**
* ida_get_new_above - allocate new ID above or equal to a start id
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index c8d55565fafa..0a7ae3288a24 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -24,6 +24,7 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/export.h>
@@ -2135,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t gfp)
}
EXPORT_SYMBOL(ida_pre_get);
-void __rcu **idr_get_free_cmn(struct radix_tree_root *root,
+void __rcu **idr_get_free(struct radix_tree_root *root,
struct radix_tree_iter *iter, gfp_t gfp,
unsigned long max)
{
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 4cd9ea9b3449..b4e22345963f 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -83,6 +83,7 @@ struct bpf_test {
__u32 result;
} test[MAX_SUBTESTS];
int (*fill_helper)(struct bpf_test *self);
+ int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
__u8 frag_data[MAX_DATA];
int stack_depth; /* for eBPF only, since tests don't call verifier */
};
@@ -2026,7 +2027,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
- { }
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{
"check: div_k_0",
@@ -2036,7 +2039,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
- { }
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{
"check: unknown insn",
@@ -2047,7 +2052,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_EXPECTED_FAIL,
{ },
- { }
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{
"check: out of range spill/fill",
@@ -2057,7 +2064,9 @@ static struct bpf_test tests[] = {
},
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
- { }
+ { },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{
"JUMPS + HOLES",
@@ -2149,6 +2158,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{
"check: LDX + RET X",
@@ -2159,6 +2170,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"M[]: alt STX + LDX",
@@ -2333,6 +2346,8 @@ static struct bpf_test tests[] = {
CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
{ },
{ },
+ .fill_helper = NULL,
+ .expected_errcode = -EINVAL,
},
{ /* Passes checker but fails during runtime. */
"LD [SKF_AD_OFF-1]",
@@ -5395,6 +5410,7 @@ static struct bpf_test tests[] = {
{ },
{ },
.fill_helper = bpf_fill_maxinsns4,
+ .expected_errcode = -EINVAL,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Very long jump",
@@ -5450,10 +5466,15 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: Jump, gap, jump, ...",
{ },
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
CLASSIC | FLAG_NO_DATA,
+#endif
{ },
{ { 0, 0xababcbac } },
.fill_helper = bpf_fill_maxinsns11,
+ .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
@@ -6344,7 +6365,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
*err = bpf_prog_create(&fp, &fprog);
if (tests[which].aux & FLAG_EXPECTED_FAIL) {
- if (*err == -EINVAL) {
+ if (*err == tests[which].expected_errcode) {
pr_cont("PASS\n");
/* Verifier rejected filter as expected. */
*err = 0;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 5c036d2f401e..1e492ef2a33d 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -421,6 +421,10 @@ ceph_parse_options(char *options, const char *dev_name,
opt->name = kstrndup(argstr[0].from,
argstr[0].to-argstr[0].from,
GFP_KERNEL);
+ if (!opt->name) {
+ err = -ENOMEM;
+ goto out;
+ }
break;
case Opt_secret:
opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 56af8e41abfc..bc290413a49d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1951,6 +1951,38 @@ static struct net *rtnl_link_get_net_capable(const struct sk_buff *skb,
return net;
}
+/* Verify that rtnetlink requests do not pass additional properties
+ * potentially referring to different network namespaces.
+ */
+static int rtnl_ensure_unique_netns(struct nlattr *tb[],
+ struct netlink_ext_ack *extack,
+ bool netns_id_only)
+{
+
+ if (netns_id_only) {
+ if (!tb[IFLA_NET_NS_PID] && !tb[IFLA_NET_NS_FD])
+ return 0;
+
+ NL_SET_ERR_MSG(extack, "specified netns attribute not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (tb[IFLA_IF_NETNSID] && (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]))
+ goto invalid_attr;
+
+ if (tb[IFLA_NET_NS_PID] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_FD]))
+ goto invalid_attr;
+
+ if (tb[IFLA_NET_NS_FD] && (tb[IFLA_IF_NETNSID] || tb[IFLA_NET_NS_PID]))
+ goto invalid_attr;
+
+ return 0;
+
+invalid_attr:
+ NL_SET_ERR_MSG(extack, "multiple netns identifying attributes specified");
+ return -EINVAL;
+}
+
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
@@ -2553,6 +2585,10 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
goto errout;
+ err = rtnl_ensure_unique_netns(tb, extack, false);
+ if (err < 0)
+ goto errout;
+
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
@@ -2649,6 +2685,10 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
+ err = rtnl_ensure_unique_netns(tb, extack, true);
+ if (err < 0)
+ return err;
+
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
@@ -2802,6 +2842,10 @@ replay:
if (err < 0)
return err;
+ err = rtnl_ensure_unique_netns(tb, extack, false);
+ if (err < 0)
+ return err;
+
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
@@ -3045,6 +3089,10 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err < 0)
return err;
+ err = rtnl_ensure_unique_netns(tb, extack, true);
+ if (err < 0)
+ return err;
+
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8c61c27c1b28..09bd89c90a71 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3894,10 +3894,12 @@ EXPORT_SYMBOL_GPL(skb_gro_receive);
void __init skb_init(void)
{
- skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
+ skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ offsetof(struct sk_buff, cb),
+ sizeof_field(struct sk_buff, cb),
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
sizeof(struct sk_buff_fclones),
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
index af781010753b..49da67034f29 100644
--- a/net/dns_resolver/dns_query.c
+++ b/net/dns_resolver/dns_query.c
@@ -52,11 +52,11 @@
* @name: Name to look up
* @namelen: Length of name
* @options: Request options (or NULL if no options)
- * @_result: Where to place the returned data.
+ * @_result: Where to place the returned data (or NULL)
* @_expiry: Where to store the result expiry time (or NULL)
*
- * The data will be returned in the pointer at *result, and the caller is
- * responsible for freeing it.
+ * The data will be returned in the pointer at *result, if provided, and the
+ * caller is responsible for freeing it.
*
* The description should be of the form "[<query_type>:]<domain_name>", and
* the options need to be appropriate for the query type requested. If no
@@ -81,7 +81,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
kenter("%s,%*.*s,%zu,%s",
type, (int)namelen, (int)namelen, name, namelen, options);
- if (!name || namelen == 0 || !_result)
+ if (!name || namelen == 0)
return -EINVAL;
/* construct the query key description as "[<type>:]<name>" */
@@ -146,13 +146,15 @@ int dns_query(const char *type, const char *name, size_t namelen,
upayload = user_key_payload_locked(rkey);
len = upayload->datalen;
- ret = -ENOMEM;
- *_result = kmalloc(len + 1, GFP_KERNEL);
- if (!*_result)
- goto put;
+ if (_result) {
+ ret = -ENOMEM;
+ *_result = kmalloc(len + 1, GFP_KERNEL);
+ if (!*_result)
+ goto put;
- memcpy(*_result, upayload->data, len);
- (*_result)[len] = '\0';
+ memcpy(*_result, upayload->data, len);
+ (*_result)[len] = '\0';
+ }
if (_expiry)
*_expiry = rkey->expiry;
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 5f52236780b4..dfe6fa4ea554 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -80,8 +80,7 @@ endif # NF_TABLES
config NF_FLOW_TABLE_IPV4
tristate "Netfilter flow table IPv4 module"
- depends on NF_CONNTRACK && NF_TABLES
- select NF_FLOW_TABLE
+ depends on NF_FLOW_TABLE
help
This option adds the flow table IPv4 support.
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index b2d01eb25f2c..25d2975da156 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -260,6 +260,7 @@ static struct nf_flowtable_type flowtable_ipv4 = {
.family = NFPROTO_IPV4,
.params = &nf_flow_offload_rhash_params,
.gc = nf_flow_offload_work_gc,
+ .free = nf_flow_table_free,
.hook = nf_flow_offload_ip_hook,
.owner = THIS_MODULE,
};
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 95738aa0d8a6..f8ad397e285e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -705,7 +705,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
*/
if (sk) {
arg.bound_dev_if = sk->sk_bound_dev_if;
- trace_tcp_send_reset(sk, skb);
+ if (sk_fullsock(sk))
+ trace_tcp_send_reset(sk, skb);
}
BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 6bb9e14c710a..622caa4039e0 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -29,6 +29,18 @@ static struct tcp_ulp_ops *tcp_ulp_find(const char *name)
return NULL;
}
+static struct tcp_ulp_ops *tcp_ulp_find_id(const int ulp)
+{
+ struct tcp_ulp_ops *e;
+
+ list_for_each_entry_rcu(e, &tcp_ulp_list, list) {
+ if (e->uid == ulp)
+ return e;
+ }
+
+ return NULL;
+}
+
static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
{
const struct tcp_ulp_ops *ulp = NULL;
@@ -51,6 +63,18 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
return ulp;
}
+static const struct tcp_ulp_ops *__tcp_ulp_lookup(const int uid)
+{
+ const struct tcp_ulp_ops *ulp;
+
+ rcu_read_lock();
+ ulp = tcp_ulp_find_id(uid);
+ if (!ulp || !try_module_get(ulp->owner))
+ ulp = NULL;
+ rcu_read_unlock();
+ return ulp;
+}
+
/* Attach new upper layer protocol to the list
* of available protocols.
*/
@@ -59,13 +83,10 @@ int tcp_register_ulp(struct tcp_ulp_ops *ulp)
int ret = 0;
spin_lock(&tcp_ulp_list_lock);
- if (tcp_ulp_find(ulp->name)) {
- pr_notice("%s already registered or non-unique name\n",
- ulp->name);
+ if (tcp_ulp_find(ulp->name))
ret = -EEXIST;
- } else {
+ else
list_add_tail_rcu(&ulp->list, &tcp_ulp_list);
- }
spin_unlock(&tcp_ulp_list_lock);
return ret;
@@ -124,6 +145,34 @@ int tcp_set_ulp(struct sock *sk, const char *name)
if (!ulp_ops)
return -ENOENT;
+ if (!ulp_ops->user_visible) {
+ module_put(ulp_ops->owner);
+ return -ENOENT;
+ }
+
+ err = ulp_ops->init(sk);
+ if (err) {
+ module_put(ulp_ops->owner);
+ return err;
+ }
+
+ icsk->icsk_ulp_ops = ulp_ops;
+ return 0;
+}
+
+int tcp_set_ulp_id(struct sock *sk, int ulp)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ const struct tcp_ulp_ops *ulp_ops;
+ int err;
+
+ if (icsk->icsk_ulp_ops)
+ return -EEXIST;
+
+ ulp_ops = __tcp_ulp_lookup(ulp);
+ if (!ulp_ops)
+ return -ENOENT;
+
err = ulp_ops->init(sk);
if (err) {
module_put(ulp_ops->owner);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 4a634b7a2c80..d395d1590699 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -73,8 +73,7 @@ endif # NF_TABLES
config NF_FLOW_TABLE_IPV6
tristate "Netfilter flow table IPv6 module"
- depends on NF_CONNTRACK && NF_TABLES
- select NF_FLOW_TABLE
+ depends on NF_FLOW_TABLE
help
This option adds the flow table IPv6 support.
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index ce53dcfda88a..b84ce3e6d728 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -264,6 +264,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
* this case. -DaveM
*/
pr_debug("end of fragment not rounded to 8 bytes.\n");
+ inet_frag_kill(&fq->q, &nf_frags);
return -EPROTO;
}
if (end > fq->q.len) {
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index fff21602875a..d346705d6ee6 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -253,6 +253,7 @@ static struct nf_flowtable_type flowtable_ipv6 = {
.family = NFPROTO_IPV6,
.params = &nf_flow_offload_rhash_params,
.gc = nf_flow_offload_work_gc,
+ .free = nf_flow_table_free,
.hook = nf_flow_offload_ipv6_hook,
.owner = THIS_MODULE,
};
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb2d251c0500..9dcfadddd800 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2479,7 +2479,7 @@ static int ip6_route_check_nh_onlink(struct net *net,
struct net_device *dev,
struct netlink_ext_ack *extack)
{
- u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
+ u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
const struct in6_addr *gw_addr = &cfg->fc_gateway;
u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
struct rt6_info *grt;
@@ -2488,8 +2488,10 @@ static int ip6_route_check_nh_onlink(struct net *net,
err = 0;
grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
if (grt) {
- if (grt->rt6i_flags & flags || dev != grt->dst.dev) {
- NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
+ if (!grt->dst.error &&
+ (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
+ NL_SET_ERR_MSG(extack,
+ "Nexthop has invalid gateway or device mismatch");
err = -EINVAL;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a1ab29e2ab3b..412139f4eccd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -942,7 +942,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
if (sk) {
oif = sk->sk_bound_dev_if;
- trace_tcp_send_reset(sk, skb);
+ if (sk_fullsock(sk))
+ trace_tcp_send_reset(sk, skb);
}
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 5dce8336d33f..e545a3c9365f 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -8,6 +8,7 @@
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/netconf.h>
+#include <linux/nospec.h>
#include <linux/vmalloc.h>
#include <linux/percpu.h>
#include <net/ip.h>
@@ -935,24 +936,27 @@ errout:
return err;
}
-static bool mpls_label_ok(struct net *net, unsigned int index,
+static bool mpls_label_ok(struct net *net, unsigned int *index,
struct netlink_ext_ack *extack)
{
+ bool is_ok = true;
+
/* Reserved labels may not be set */
- if (index < MPLS_LABEL_FIRST_UNRESERVED) {
+ if (*index < MPLS_LABEL_FIRST_UNRESERVED) {
NL_SET_ERR_MSG(extack,
"Invalid label - must be MPLS_LABEL_FIRST_UNRESERVED or higher");
- return false;
+ is_ok = false;
}
/* The full 20 bit range may not be supported. */
- if (index >= net->mpls.platform_labels) {
+ if (is_ok && *index >= net->mpls.platform_labels) {
NL_SET_ERR_MSG(extack,
"Label >= configured maximum in platform_labels");
- return false;
+ is_ok = false;
}
- return true;
+ *index = array_index_nospec(*index, net->mpls.platform_labels);
+ return is_ok;
}
static int mpls_route_add(struct mpls_route_config *cfg,
@@ -975,7 +979,7 @@ static int mpls_route_add(struct mpls_route_config *cfg,
index = find_free_label(net);
}
- if (!mpls_label_ok(net, index, extack))
+ if (!mpls_label_ok(net, &index, extack))
goto errout;
/* Append makes no sense with mpls */
@@ -1052,7 +1056,7 @@ static int mpls_route_del(struct mpls_route_config *cfg,
index = cfg->rc_label;
- if (!mpls_label_ok(net, index, extack))
+ if (!mpls_label_ok(net, &index, extack))
goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
@@ -1810,7 +1814,7 @@ static int rtm_to_route_config(struct sk_buff *skb,
goto errout;
if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
- cfg->rc_label, extack))
+ &cfg->rc_label, extack))
goto errout;
break;
}
@@ -2137,7 +2141,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
goto errout;
}
- if (!mpls_label_ok(net, in_label, extack)) {
+ if (!mpls_label_ok(net, &in_label, extack)) {
err = -EINVAL;
goto errout;
}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 9019fa98003d..d3220b43c832 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -666,8 +666,8 @@ endif # NF_TABLES
config NF_FLOW_TABLE_INET
tristate "Netfilter flow table mixed IPv4/IPv6 module"
- depends on NF_FLOW_TABLE_IPV4 && NF_FLOW_TABLE_IPV6
- select NF_FLOW_TABLE
+ depends on NF_FLOW_TABLE_IPV4
+ depends on NF_FLOW_TABLE_IPV6
help
This option adds the flow table mixed IPv4/IPv6 support.
@@ -675,7 +675,9 @@ config NF_FLOW_TABLE_INET
config NF_FLOW_TABLE
tristate "Netfilter flow table module"
- depends on NF_CONNTRACK && NF_TABLES
+ depends on NETFILTER_INGRESS
+ depends on NF_CONNTRACK
+ depends on NF_TABLES
help
This option adds the flow table core infrastructure.
diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c
index 2f5099cb85b8..ec410cae9307 100644
--- a/net/netfilter/nf_flow_table.c
+++ b/net/netfilter/nf_flow_table.c
@@ -4,6 +4,7 @@
#include <linux/netfilter.h>
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
+#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -124,7 +125,9 @@ void flow_offload_free(struct flow_offload *flow)
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
e = container_of(flow, struct flow_offload_entry, flow);
- kfree(e);
+ nf_ct_delete(e->ct, 0, 0);
+ nf_ct_put(e->ct);
+ kfree_rcu(e, rcu_head);
}
EXPORT_SYMBOL_GPL(flow_offload_free);
@@ -148,11 +151,9 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
}
EXPORT_SYMBOL_GPL(flow_offload_add);
-void flow_offload_del(struct nf_flowtable *flow_table,
- struct flow_offload *flow)
+static void flow_offload_del(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
{
- struct flow_offload_entry *e;
-
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
*flow_table->type->params);
@@ -160,10 +161,8 @@ void flow_offload_del(struct nf_flowtable *flow_table,
&flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
*flow_table->type->params);
- e = container_of(flow, struct flow_offload_entry, flow);
- kfree_rcu(e, rcu_head);
+ flow_offload_free(flow);
}
-EXPORT_SYMBOL_GPL(flow_offload_del);
struct flow_offload_tuple_rhash *
flow_offload_lookup(struct nf_flowtable *flow_table,
@@ -174,15 +173,6 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
-static void nf_flow_release_ct(const struct flow_offload *flow)
-{
- struct flow_offload_entry *e;
-
- e = container_of(flow, struct flow_offload_entry, flow);
- nf_ct_delete(e->ct, 0, 0);
- nf_ct_put(e->ct);
-}
-
int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct flow_offload *flow, void *data),
void *data)
@@ -231,19 +221,16 @@ static inline bool nf_flow_is_dying(const struct flow_offload *flow)
return flow->flags & FLOW_OFFLOAD_DYING;
}
-void nf_flow_offload_work_gc(struct work_struct *work)
+static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{
struct flow_offload_tuple_rhash *tuplehash;
- struct nf_flowtable *flow_table;
struct rhashtable_iter hti;
struct flow_offload *flow;
int err;
- flow_table = container_of(work, struct nf_flowtable, gc_work.work);
-
err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL);
if (err)
- goto schedule;
+ return 0;
rhashtable_walk_start(&hti);
@@ -261,15 +248,22 @@ void nf_flow_offload_work_gc(struct work_struct *work)
flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
if (nf_flow_has_expired(flow) ||
- nf_flow_is_dying(flow)) {
+ nf_flow_is_dying(flow))
flow_offload_del(flow_table, flow);
- nf_flow_release_ct(flow);
- }
}
out:
rhashtable_walk_stop(&hti);
rhashtable_walk_exit(&hti);
-schedule:
+
+ return 1;
+}
+
+void nf_flow_offload_work_gc(struct work_struct *work)
+{
+ struct nf_flowtable *flow_table;
+
+ flow_table = container_of(work, struct nf_flowtable, gc_work.work);
+ nf_flow_offload_gc_step(flow_table);
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc);
@@ -425,5 +419,35 @@ int nf_flow_dnat_port(const struct flow_offload *flow,
}
EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
+{
+ struct net_device *dev = data;
+
+ if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
+ return;
+
+ flow_offload_dead(flow);
+}
+
+static void nf_flow_table_iterate_cleanup(struct nf_flowtable *flowtable,
+ void *data)
+{
+ nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, data);
+ flush_delayed_work(&flowtable->gc_work);
+}
+
+void nf_flow_table_cleanup(struct net *net, struct net_device *dev)
+{
+ nft_flow_table_iterate(net, nf_flow_table_iterate_cleanup, dev);
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
+
+void nf_flow_table_free(struct nf_flowtable *flow_table)
+{
+ nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
+ WARN_ON(!nf_flow_offload_gc_step(flow_table));
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_free);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 281209aeba8f..375a1881d93d 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -24,6 +24,7 @@ static struct nf_flowtable_type flowtable_inet = {
.family = NFPROTO_INET,
.params = &nf_flow_offload_rhash_params,
.gc = nf_flow_offload_work_gc,
+ .free = nf_flow_table_free,
.hook = nf_flow_offload_inet_hook,
.owner = THIS_MODULE,
};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 0791813a1e7d..8b9fe30de0cd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5006,13 +5006,13 @@ void nft_flow_table_iterate(struct net *net,
struct nft_flowtable *flowtable;
const struct nft_table *table;
- rcu_read_lock();
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
- list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(flowtable, &table->flowtables, list) {
iter(&flowtable->data, data);
}
}
- rcu_read_unlock();
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
}
EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
@@ -5399,17 +5399,12 @@ err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
-static void nft_flowtable_destroy(void *ptr, void *arg)
-{
- kfree(ptr);
-}
-
static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
{
cancel_delayed_work_sync(&flowtable->data.gc_work);
kfree(flowtable->name);
- rhashtable_free_and_destroy(&flowtable->data.rhashtable,
- nft_flowtable_destroy, NULL);
+ flowtable->data.type->free(&flowtable->data);
+ rhashtable_destroy(&flowtable->data.rhashtable);
module_put(flowtable->data.type->owner);
}
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 4503b8dcf9c0..b65829b2be22 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -194,22 +194,6 @@ static struct nft_expr_type nft_flow_offload_type __read_mostly = {
.owner = THIS_MODULE,
};
-static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data)
-{
- struct net_device *dev = data;
-
- if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex)
- return;
-
- flow_offload_dead(flow);
-}
-
-static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable,
- void *data)
-{
- nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
-}
-
static int flow_offload_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -218,7 +202,7 @@ static int flow_offload_netdev_event(struct notifier_block *this,
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
- nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev);
+ nf_flow_table_cleanup(dev_net(dev), dev);
return NOTIFY_DONE;
}
@@ -246,14 +230,8 @@ register_expr:
static void __exit nft_flow_offload_module_exit(void)
{
- struct net *net;
-
nft_unregister_expr(&nft_flow_offload_type);
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
- rtnl_lock();
- for_each_net(net)
- nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL);
- rtnl_unlock();
}
module_init(nft_flow_offload_module_init);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8fa4d37141a7..2f685ee1f9c8 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1008,7 +1008,12 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
- info = kvmalloc(sz, GFP_KERNEL);
+ /* __GFP_NORETRY is not fully supported by kvmalloc but it should
+ * work reasonably well if sz is too large and bail out rather
+ * than shoot all processes down before realizing there is nothing
+ * more to reclaim.
+ */
+ info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY);
if (!info)
return NULL;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 498b54fd04d7..141c295191f6 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -39,23 +39,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
hlist_add_head(&est->list, &rateest_hash[h]);
}
-struct xt_rateest *xt_rateest_lookup(const char *name)
+static struct xt_rateest *__xt_rateest_lookup(const char *name)
{
struct xt_rateest *est;
unsigned int h;
h = xt_rateest_hash(name);
- mutex_lock(&xt_rateest_mutex);
hlist_for_each_entry(est, &rateest_hash[h], list) {
if (strcmp(est->name, name) == 0) {
est->refcnt++;
- mutex_unlock(&xt_rateest_mutex);
return est;
}
}
- mutex_unlock(&xt_rateest_mutex);
+
return NULL;
}
+
+struct xt_rateest *xt_rateest_lookup(const char *name)
+{
+ struct xt_rateest *est;
+
+ mutex_lock(&xt_rateest_mutex);
+ est = __xt_rateest_lookup(name);
+ mutex_unlock(&xt_rateest_mutex);
+ return est;
+}
EXPORT_SYMBOL_GPL(xt_rateest_lookup);
void xt_rateest_put(struct xt_rateest *est)
@@ -100,8 +108,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
- est = xt_rateest_lookup(info->name);
+ mutex_lock(&xt_rateest_mutex);
+ est = __xt_rateest_lookup(info->name);
if (est) {
+ mutex_unlock(&xt_rateest_mutex);
/*
* If estimator parameters are specified, they must match the
* existing estimator.
@@ -139,11 +149,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
info->est = est;
xt_rateest_hash_insert(est);
+ mutex_unlock(&xt_rateest_mutex);
return 0;
err2:
kfree(est);
err1:
+ mutex_unlock(&xt_rateest_mutex);
return ret;
}
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c
index 1db1ce59079f..891f4e7e8ea7 100644
--- a/net/netfilter/xt_cgroup.c
+++ b/net/netfilter/xt_cgroup.c
@@ -52,6 +52,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
return -EINVAL;
}
+ info->priv = NULL;
if (info->has_path) {
cgrp = cgroup_get_from_path(info->path);
if (IS_ERR(cgrp)) {
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index d444daf1ac04..6f02499ef007 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -1081,6 +1081,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
{
struct sk_buff *tmp;
struct net *net, *prev = NULL;
+ bool delivered = false;
int err;
for_each_net_rcu(net) {
@@ -1092,14 +1093,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
}
err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags);
- if (err)
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
goto error;
}
prev = net;
}
- return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ goto error;
+ return delivered ? 0 : -ESRCH;
error:
kfree_skb(skb);
return err;
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 8d19fd25dce3..63da9d2f142d 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -223,7 +223,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
rcu_read_lock();
if (!test_and_set_bit(0, &conn->c_map_queued) &&
- !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ !rds_destroy_pending(cp->cp_conn)) {
rds_stats_inc(s_cong_update_queued);
/* We cannot inline the call to rds_send_xmit() here
* for two reasons (both pertaining to a TCP transport):
diff --git a/net/rds/connection.c b/net/rds/connection.c
index b10c0ef36d8d..94e190febfdd 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -220,8 +220,13 @@ static struct rds_connection *__rds_conn_create(struct net *net,
is_outgoing);
conn->c_path[i].cp_index = i;
}
- ret = trans->conn_alloc(conn, gfp);
+ rcu_read_lock();
+ if (rds_destroy_pending(conn))
+ ret = -ENETDOWN;
+ else
+ ret = trans->conn_alloc(conn, gfp);
if (ret) {
+ rcu_read_unlock();
kfree(conn->c_path);
kmem_cache_free(rds_conn_slab, conn);
conn = ERR_PTR(ret);
@@ -283,6 +288,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
}
}
spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
out:
return conn;
@@ -382,13 +388,10 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp)
{
struct rds_message *rm, *rtmp;
- set_bit(RDS_DESTROY_PENDING, &cp->cp_flags);
-
if (!cp->cp_transport_data)
return;
/* make sure lingering queued work won't try to ref the conn */
- synchronize_rcu();
cancel_delayed_work_sync(&cp->cp_send_w);
cancel_delayed_work_sync(&cp->cp_recv_w);
@@ -691,7 +694,7 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy)
atomic_set(&cp->cp_state, RDS_CONN_ERROR);
rcu_read_lock();
- if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ if (!destroy && rds_destroy_pending(cp->cp_conn)) {
rcu_read_unlock();
return;
}
@@ -714,7 +717,7 @@ EXPORT_SYMBOL_GPL(rds_conn_drop);
void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
{
rcu_read_lock();
- if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ if (rds_destroy_pending(cp->cp_conn)) {
rcu_read_unlock();
return;
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index ff0c98096af1..50a88f3e7e39 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -48,6 +48,7 @@
static unsigned int rds_ib_mr_1m_pool_size = RDS_MR_1M_POOL_SIZE;
static unsigned int rds_ib_mr_8k_pool_size = RDS_MR_8K_POOL_SIZE;
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
+static atomic_t rds_ib_unloading;
module_param(rds_ib_mr_1m_pool_size, int, 0444);
MODULE_PARM_DESC(rds_ib_mr_1m_pool_size, " Max number of 1M mr per HCA");
@@ -378,8 +379,23 @@ static void rds_ib_unregister_client(void)
flush_workqueue(rds_wq);
}
+static void rds_ib_set_unloading(void)
+{
+ atomic_set(&rds_ib_unloading, 1);
+}
+
+static bool rds_ib_is_unloading(struct rds_connection *conn)
+{
+ struct rds_conn_path *cp = &conn->c_path[0];
+
+ return (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags) ||
+ atomic_read(&rds_ib_unloading) != 0);
+}
+
void rds_ib_exit(void)
{
+ rds_ib_set_unloading();
+ synchronize_rcu();
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
@@ -413,6 +429,7 @@ struct rds_transport rds_ib_transport = {
.flush_mrs = rds_ib_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
+ .t_unloading = rds_ib_is_unloading,
.t_type = RDS_TRANS_IB
};
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 80fb6f63e768..eea1d8611b20 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -117,6 +117,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
&conn->c_laddr, &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version));
+ set_bit(RDS_DESTROY_PENDING, &conn->c_path[0].cp_flags);
rds_conn_destroy(conn);
return;
} else {
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 374ae83b60d4..7301b9b01890 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -518,6 +518,7 @@ struct rds_transport {
void (*sync_mr)(void *trans_private, int direction);
void (*free_mr)(void *trans_private, int invalidate);
void (*flush_mrs)(void);
+ bool (*t_unloading)(struct rds_connection *conn);
};
struct rds_sock {
@@ -862,6 +863,12 @@ static inline void rds_mr_put(struct rds_mr *mr)
__rds_put_mr_final(mr);
}
+static inline bool rds_destroy_pending(struct rds_connection *conn)
+{
+ return !check_net(rds_conn_net(conn)) ||
+ (conn->c_trans->t_unloading && conn->c_trans->t_unloading(conn));
+}
+
/* stats.c */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
#define rds_stats_inc_which(which, member) do { \
diff --git a/net/rds/send.c b/net/rds/send.c
index d3e32d1f3c7d..b1b0022b8370 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -162,7 +162,7 @@ restart:
goto out;
}
- if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ if (rds_destroy_pending(cp->cp_conn)) {
release_in_xmit(cp);
ret = -ENETUNREACH; /* dont requeue send work */
goto out;
@@ -444,7 +444,7 @@ over_batch:
if (batch_count < send_batch_count)
goto restart;
rcu_read_lock();
- if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (rds_destroy_pending(cp->cp_conn))
ret = -ENETUNREACH;
else
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
@@ -1162,7 +1162,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
else
cpath = &conn->c_path[0];
- if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) {
+ if (rds_destroy_pending(conn)) {
ret = -EAGAIN;
goto out;
}
@@ -1209,7 +1209,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (ret == -ENOMEM || ret == -EAGAIN) {
ret = 0;
rcu_read_lock();
- if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags))
+ if (rds_destroy_pending(cpath->cp_conn))
ret = -ENETUNREACH;
else
queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
@@ -1295,7 +1295,7 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport,
/* schedule the send work on rds_wq */
rcu_read_lock();
- if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
rcu_read_unlock();
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 9920d2f84eff..44c4652721af 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -49,6 +49,7 @@ static unsigned int rds_tcp_tc_count;
/* Track rds_tcp_connection structs so they can be cleaned up */
static DEFINE_SPINLOCK(rds_tcp_conn_lock);
static LIST_HEAD(rds_tcp_conn_list);
+static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
static struct kmem_cache *rds_tcp_conn_slab;
@@ -274,14 +275,13 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
static void rds_tcp_conn_free(void *arg)
{
struct rds_tcp_connection *tc = arg;
- unsigned long flags;
rdsdebug("freeing tc %p\n", tc);
- spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+ spin_lock_bh(&rds_tcp_conn_lock);
if (!tc->t_tcp_node_detached)
list_del(&tc->t_tcp_node);
- spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+ spin_unlock_bh(&rds_tcp_conn_lock);
kmem_cache_free(rds_tcp_conn_slab, tc);
}
@@ -296,7 +296,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
if (!tc) {
ret = -ENOMEM;
- break;
+ goto fail;
}
mutex_init(&tc->t_conn_path_lock);
tc->t_sock = NULL;
@@ -306,14 +306,19 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
conn->c_path[i].cp_transport_data = tc;
tc->t_cpath = &conn->c_path[i];
+ tc->t_tcp_node_detached = true;
- spin_lock_irq(&rds_tcp_conn_lock);
- tc->t_tcp_node_detached = false;
- list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
- spin_unlock_irq(&rds_tcp_conn_lock);
rdsdebug("rds_conn_path [%d] tc %p\n", i,
conn->c_path[i].cp_transport_data);
}
+ spin_lock_bh(&rds_tcp_conn_lock);
+ for (i = 0; i < RDS_MPATH_WORKERS; i++) {
+ tc = conn->c_path[i].cp_transport_data;
+ tc->t_tcp_node_detached = false;
+ list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
+ }
+ spin_unlock_bh(&rds_tcp_conn_lock);
+fail:
if (ret) {
for (j = 0; j < i; j++)
rds_tcp_conn_free(conn->c_path[j].cp_transport_data);
@@ -332,6 +337,16 @@ static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
return false;
}
+static void rds_tcp_set_unloading(void)
+{
+ atomic_set(&rds_tcp_unloading, 1);
+}
+
+static bool rds_tcp_is_unloading(struct rds_connection *conn)
+{
+ return atomic_read(&rds_tcp_unloading) != 0;
+}
+
static void rds_tcp_destroy_conns(void)
{
struct rds_tcp_connection *tc, *_tc;
@@ -370,6 +385,7 @@ struct rds_transport rds_tcp_transport = {
.t_type = RDS_TRANS_TCP,
.t_prefer_loopback = 1,
.t_mp_capable = 1,
+ .t_unloading = rds_tcp_is_unloading,
};
static unsigned int rds_tcp_netid;
@@ -513,7 +529,7 @@ static void rds_tcp_kill_sock(struct net *net)
rtn->rds_tcp_listen_sock = NULL;
rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
- spin_lock_irq(&rds_tcp_conn_lock);
+ spin_lock_bh(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
@@ -526,7 +542,7 @@ static void rds_tcp_kill_sock(struct net *net)
tc->t_tcp_node_detached = true;
}
}
- spin_unlock_irq(&rds_tcp_conn_lock);
+ spin_unlock_bh(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
rds_conn_destroy(tc->t_cpath->cp_conn);
}
@@ -574,7 +590,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
{
struct rds_tcp_connection *tc, *_tc;
- spin_lock_irq(&rds_tcp_conn_lock);
+ spin_lock_bh(&rds_tcp_conn_lock);
list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
@@ -584,7 +600,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
/* reconnect with new parameters */
rds_conn_path_drop(tc->t_cpath, false);
}
- spin_unlock_irq(&rds_tcp_conn_lock);
+ spin_unlock_bh(&rds_tcp_conn_lock);
}
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
@@ -607,6 +623,8 @@ static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
static void rds_tcp_exit(void)
{
+ rds_tcp_set_unloading();
+ synchronize_rcu();
rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
unregister_pernet_subsys(&rds_tcp_net_ops);
if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index 534c67aeb20f..d999e7075645 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp)
cp->cp_conn, tc, sock);
if (sock) {
- if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (rds_destroy_pending(cp->cp_conn))
rds_tcp_set_linger(sock);
sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
lock_sock(sock->sk);
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index dd707b9e73e5..b9fbd2ee74ef 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -323,7 +323,7 @@ void rds_tcp_data_ready(struct sock *sk)
if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
rcu_read_lock();
- if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
rcu_read_unlock();
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 16f65744d984..7df869d37afd 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -204,7 +204,7 @@ void rds_tcp_write_space(struct sock *sk)
rcu_read_lock();
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
- !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ !rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
rcu_read_unlock();
diff --git a/net/rds/threads.c b/net/rds/threads.c
index eb76db1360b0..c52861d77a59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -88,7 +88,7 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr)
cp->cp_reconnect_jiffies = 0;
set_bit(0, &cp->cp_conn->c_map_queued);
rcu_read_lock();
- if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) {
+ if (!rds_destroy_pending(cp->cp_conn)) {
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
}
@@ -138,7 +138,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
if (cp->cp_reconnect_jiffies == 0) {
cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
rcu_read_lock();
- if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
rcu_read_unlock();
return;
@@ -149,7 +149,7 @@ void rds_queue_reconnect(struct rds_conn_path *cp)
rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies,
conn, &conn->c_laddr, &conn->c_faddr);
rcu_read_lock();
- if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags))
+ if (!rds_destroy_pending(cp->cp_conn))
queue_delayed_work(rds_wq, &cp->cp_conn_w,
rand % cp->cp_reconnect_jiffies);
rcu_read_unlock();
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 7f74ca3059f8..064175068059 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -834,7 +834,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
* can be skipped if we find a follow-on call. The first DATA packet
* of the follow on call will implicitly ACK this call.
*/
- if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ if (call->completion == RXRPC_CALL_SUCCEEDED &&
+ test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
unsigned long final_ack_at = jiffies + 2;
WRITE_ONCE(chan->final_ack_at, final_ack_at);
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 4ca11be6be3c..b1dfae107431 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -460,6 +460,7 @@ void rxrpc_process_connection(struct work_struct *work)
case -EKEYEXPIRED:
case -EKEYREJECTED:
goto protocol_error;
+ case -ENOMEM:
case -EAGAIN:
goto requeue_and_leave;
case -ECONNABORTED:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index c628351eb900..ccbac190add1 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -177,13 +177,21 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
* through the channel, whilst disposing of the actual call record.
*/
trace_rxrpc_disconnect_call(call);
- if (call->abort_code) {
- chan->last_abort = call->abort_code;
- chan->last_type = RXRPC_PACKET_TYPE_ABORT;
- } else {
+ switch (call->completion) {
+ case RXRPC_CALL_SUCCEEDED:
chan->last_seq = call->rx_hard_ack;
chan->last_type = RXRPC_PACKET_TYPE_ACK;
+ break;
+ case RXRPC_CALL_LOCALLY_ABORTED:
+ chan->last_abort = call->abort_code;
+ chan->last_type = RXRPC_PACKET_TYPE_ABORT;
+ break;
+ default:
+ chan->last_abort = RX_USER_ABORT;
+ chan->last_type = RXRPC_PACKET_TYPE_ABORT;
+ break;
}
+
/* Sync with rxrpc_conn_retransmit(). */
smp_wmb();
chan->last_call = chan->call_id;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index c38b3a1de56c..77cb23c7bd0a 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -773,8 +773,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
{
const struct rxrpc_key_token *token;
struct rxkad_challenge challenge;
- struct rxkad_response resp
- __attribute__((aligned(8))); /* must be aligned for crypto */
+ struct rxkad_response *resp;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
const char *eproto;
u32 version, nonce, min_level, abort_code;
@@ -818,26 +817,29 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
token = conn->params.key->payload.data[0];
/* build the response packet */
- memset(&resp, 0, sizeof(resp));
-
- resp.version = htonl(RXKAD_VERSION);
- resp.encrypted.epoch = htonl(conn->proto.epoch);
- resp.encrypted.cid = htonl(conn->proto.cid);
- resp.encrypted.securityIndex = htonl(conn->security_ix);
- resp.encrypted.inc_nonce = htonl(nonce + 1);
- resp.encrypted.level = htonl(conn->params.security_level);
- resp.kvno = htonl(token->kad->kvno);
- resp.ticket_len = htonl(token->kad->ticket_len);
-
- resp.encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
- resp.encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
- resp.encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
- resp.encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
+ resp = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->version = htonl(RXKAD_VERSION);
+ resp->encrypted.epoch = htonl(conn->proto.epoch);
+ resp->encrypted.cid = htonl(conn->proto.cid);
+ resp->encrypted.securityIndex = htonl(conn->security_ix);
+ resp->encrypted.inc_nonce = htonl(nonce + 1);
+ resp->encrypted.level = htonl(conn->params.security_level);
+ resp->kvno = htonl(token->kad->kvno);
+ resp->ticket_len = htonl(token->kad->ticket_len);
+ resp->encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
+ resp->encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
+ resp->encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
+ resp->encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
/* calculate the response checksum and then do the encryption */
- rxkad_calc_response_checksum(&resp);
- rxkad_encrypt_response(conn, &resp, token->kad);
- return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
+ rxkad_calc_response_checksum(resp);
+ rxkad_encrypt_response(conn, resp, token->kad);
+ ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
+ kfree(resp);
+ return ret;
protocol_error:
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
@@ -1048,8 +1050,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
struct sk_buff *skb,
u32 *_abort_code)
{
- struct rxkad_response response
- __attribute__((aligned(8))); /* must be aligned for crypto */
+ struct rxkad_response *response;
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_crypt session_key;
const char *eproto;
@@ -1061,17 +1062,22 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+ ret = -ENOMEM;
+ response = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
+ if (!response)
+ goto temporary_error;
+
eproto = tracepoint_string("rxkad_rsp_short");
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
- &response, sizeof(response)) < 0)
+ response, sizeof(*response)) < 0)
goto protocol_error;
- if (!pskb_pull(skb, sizeof(response)))
+ if (!pskb_pull(skb, sizeof(*response)))
BUG();
- version = ntohl(response.version);
- ticket_len = ntohl(response.ticket_len);
- kvno = ntohl(response.kvno);
+ version = ntohl(response->version);
+ ticket_len = ntohl(response->ticket_len);
+ kvno = ntohl(response->kvno);
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
sp->hdr.serial, version, kvno, ticket_len);
@@ -1105,31 +1111,31 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
&expiry, _abort_code);
if (ret < 0)
- goto temporary_error_free;
+ goto temporary_error_free_resp;
/* use the session key from inside the ticket to decrypt the
* response */
- rxkad_decrypt_response(conn, &response, &session_key);
+ rxkad_decrypt_response(conn, response, &session_key);
eproto = tracepoint_string("rxkad_rsp_param");
abort_code = RXKADSEALEDINCON;
- if (ntohl(response.encrypted.epoch) != conn->proto.epoch)
+ if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
goto protocol_error_free;
- if (ntohl(response.encrypted.cid) != conn->proto.cid)
+ if (ntohl(response->encrypted.cid) != conn->proto.cid)
goto protocol_error_free;
- if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
+ if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
goto protocol_error_free;
- csum = response.encrypted.checksum;
- response.encrypted.checksum = 0;
- rxkad_calc_response_checksum(&response);
+ csum = response->encrypted.checksum;
+ response->encrypted.checksum = 0;
+ rxkad_calc_response_checksum(response);
eproto = tracepoint_string("rxkad_rsp_csum");
- if (response.encrypted.checksum != csum)
+ if (response->encrypted.checksum != csum)
goto protocol_error_free;
spin_lock(&conn->channel_lock);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
struct rxrpc_call *call;
- u32 call_id = ntohl(response.encrypted.call_id[i]);
+ u32 call_id = ntohl(response->encrypted.call_id[i]);
eproto = tracepoint_string("rxkad_rsp_callid");
if (call_id > INT_MAX)
@@ -1153,12 +1159,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
eproto = tracepoint_string("rxkad_rsp_seq");
abort_code = RXKADOUTOFSEQUENCE;
- if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1)
+ if (ntohl(response->encrypted.inc_nonce) != conn->security_nonce + 1)
goto protocol_error_free;
eproto = tracepoint_string("rxkad_rsp_level");
abort_code = RXKADLEVELFAIL;
- level = ntohl(response.encrypted.level);
+ level = ntohl(response->encrypted.level);
if (level > RXRPC_SECURITY_ENCRYPT)
goto protocol_error_free;
conn->params.security_level = level;
@@ -1168,9 +1174,10 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
* as for a client connection */
ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
if (ret < 0)
- goto temporary_error_free;
+ goto temporary_error_free_ticket;
kfree(ticket);
+ kfree(response);
_leave(" = 0");
return 0;
@@ -1179,12 +1186,15 @@ protocol_error_unlock:
protocol_error_free:
kfree(ticket);
protocol_error:
+ kfree(response);
trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
*_abort_code = abort_code;
return -EPROTO;
-temporary_error_free:
+temporary_error_free_ticket:
kfree(ticket);
+temporary_error_free_resp:
+ kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
* ENOMEM. We just want to send the challenge again. Note that we
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 52622a3d2517..eba6682727dd 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,7 @@ static void free_tcf(struct tc_action *p)
static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
{
spin_lock_bh(&idrinfo->lock);
- idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
+ idr_remove(&idrinfo->action_idr, p->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
gen_kill_estimator(&p->tcfa_rate_est);
free_tcf(p);
@@ -124,7 +124,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
s_i = cb->args[0];
- idr_for_each_entry_ext(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, id) {
index++;
if (index < s_i)
continue;
@@ -181,7 +181,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
if (nla_put_string(skb, TCA_KIND, ops->kind))
goto nla_put_failure;
- idr_for_each_entry_ext(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED) {
module_put(ops->owner);
@@ -222,7 +222,7 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo)
struct tc_action *p = NULL;
spin_lock_bh(&idrinfo->lock);
- p = idr_find_ext(&idrinfo->action_idr, index);
+ p = idr_find(&idrinfo->action_idr, index);
spin_unlock_bh(&idrinfo->lock);
return p;
@@ -274,7 +274,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tcf_idrinfo *idrinfo = tn->idrinfo;
struct idr *idr = &idrinfo->action_idr;
int err = -ENOMEM;
- unsigned long idr_index;
if (unlikely(!p))
return -ENOMEM;
@@ -284,45 +283,28 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
if (cpustats) {
p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
- if (!p->cpu_bstats) {
-err1:
- kfree(p);
- return err;
- }
- p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
- if (!p->cpu_qstats) {
-err2:
- free_percpu(p->cpu_bstats);
+ if (!p->cpu_bstats)
goto err1;
- }
+ p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!p->cpu_qstats)
+ goto err2;
}
spin_lock_init(&p->tcfa_lock);
+ idr_preload(GFP_KERNEL);
+ spin_lock_bh(&idrinfo->lock);
/* user doesn't specify an index */
if (!index) {
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&idrinfo->lock);
- err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0,
- GFP_ATOMIC);
- spin_unlock_bh(&idrinfo->lock);
- idr_preload_end();
- if (err) {
-err3:
- free_percpu(p->cpu_qstats);
- goto err2;
- }
- p->tcfa_index = idr_index;
+ index = 1;
+ err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC);
} else {
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&idrinfo->lock);
- err = idr_alloc_ext(idr, NULL, NULL, index, index + 1,
- GFP_ATOMIC);
- spin_unlock_bh(&idrinfo->lock);
- idr_preload_end();
- if (err)
- goto err3;
- p->tcfa_index = index;
+ err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC);
}
+ spin_unlock_bh(&idrinfo->lock);
+ idr_preload_end();
+ if (err)
+ goto err3;
+ p->tcfa_index = index;
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
p->tcfa_tm.firstuse = 0;
@@ -330,9 +312,8 @@ err3:
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
&p->tcfa_lock, NULL, est);
- if (err) {
- goto err3;
- }
+ if (err)
+ goto err4;
}
p->idrinfo = idrinfo;
@@ -340,6 +321,15 @@ err3:
INIT_LIST_HEAD(&p->list);
*a = p;
return 0;
+err4:
+ idr_remove(idr, index);
+err3:
+ free_percpu(p->cpu_qstats);
+err2:
+ free_percpu(p->cpu_bstats);
+err1:
+ kfree(p);
+ return err;
}
EXPORT_SYMBOL(tcf_idr_create);
@@ -348,7 +338,7 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a)
struct tcf_idrinfo *idrinfo = tn->idrinfo;
spin_lock_bh(&idrinfo->lock);
- idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index);
+ idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
spin_unlock_bh(&idrinfo->lock);
}
EXPORT_SYMBOL(tcf_idr_insert);
@@ -361,7 +351,7 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
int ret;
unsigned long id = 1;
- idr_for_each_entry_ext(idr, p, id) {
+ idr_for_each_entry_ul(idr, p, id) {
ret = __tcf_idr_release(p, false, true);
if (ret == ACT_P_DELETED)
module_put(ops->owner);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bcb4ccb5f894..2bc1bc23d42e 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -381,8 +381,8 @@ static int tcf_block_insert(struct tcf_block *block, struct net *net,
struct tcf_net *tn = net_generic(net, tcf_net_id);
int err;
- err = idr_alloc_ext(&tn->idr, block, NULL, block_index,
- block_index + 1, GFP_KERNEL);
+ err = idr_alloc_u32(&tn->idr, block, &block_index, block_index,
+ GFP_KERNEL);
if (err)
return err;
block->index = block_index;
@@ -393,7 +393,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
- idr_remove_ext(&tn->idr, block->index);
+ idr_remove(&tn->idr, block->index);
}
static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
@@ -434,7 +434,7 @@ static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
{
struct tcf_net *tn = net_generic(net, tcf_net_id);
- return idr_find_ext(&tn->idr, block_index);
+ return idr_find(&tn->idr, block_index);
}
static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d333f5c5101d..6b7ab3512f5b 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -120,7 +120,7 @@ static void basic_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
list_for_each_entry_safe(f, n, &head->flist, link) {
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
- idr_remove_ext(&head->handle_idr, f->handle);
+ idr_remove(&head->handle_idr, f->handle);
if (tcf_exts_get_net(&f->exts))
call_rcu(&f->rcu, basic_delete_filter);
else
@@ -138,7 +138,7 @@ static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
list_del_rcu(&f->link);
tcf_unbind_filter(tp, &f->res);
- idr_remove_ext(&head->handle_idr, f->handle);
+ idr_remove(&head->handle_idr, f->handle);
tcf_exts_get_net(&f->exts);
call_rcu(&f->rcu, basic_delete_filter);
*last = list_empty(&head->flist);
@@ -185,7 +185,6 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *tb[TCA_BASIC_MAX + 1];
struct basic_filter *fold = (struct basic_filter *) *arg;
struct basic_filter *fnew;
- unsigned long idr_index;
if (tca[TCA_OPTIONS] == NULL)
return -EINVAL;
@@ -208,34 +207,30 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto errout;
- if (handle) {
- fnew->handle = handle;
- if (!fold) {
- err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
- handle, handle + 1, GFP_KERNEL);
- if (err)
- goto errout;
- }
- } else {
- err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
- 1, 0x7FFFFFFF, GFP_KERNEL);
- if (err)
- goto errout;
- fnew->handle = idr_index;
+ if (!handle) {
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_KERNEL);
+ } else if (!fold) {
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_KERNEL);
}
+ if (err)
+ goto errout;
+ fnew->handle = handle;
err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
extack);
if (err < 0) {
if (!fold)
- idr_remove_ext(&head->handle_idr, fnew->handle);
+ idr_remove(&head->handle_idr, fnew->handle);
goto errout;
}
*arg = fnew;
if (fold) {
- idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->link, &fnew->link);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 8e5326bc6440..b07c1fa8bc0d 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -295,7 +295,7 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
{
struct cls_bpf_head *head = rtnl_dereference(tp->root);
- idr_remove_ext(&head->handle_idr, prog->handle);
+ idr_remove(&head->handle_idr, prog->handle);
cls_bpf_stop_offload(tp, prog, extack);
list_del_rcu(&prog->link);
tcf_unbind_filter(tp, &prog->res);
@@ -471,7 +471,6 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
struct cls_bpf_prog *oldprog = *arg;
struct nlattr *tb[TCA_BPF_MAX + 1];
struct cls_bpf_prog *prog;
- unsigned long idr_index;
int ret;
if (tca[TCA_OPTIONS] == NULL)
@@ -498,21 +497,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
}
if (handle == 0) {
- ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
- 1, 0x7FFFFFFF, GFP_KERNEL);
- if (ret)
- goto errout;
- prog->handle = idr_index;
- } else {
- if (!oldprog) {
- ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
- handle, handle + 1, GFP_KERNEL);
- if (ret)
- goto errout;
- }
- prog->handle = handle;
+ handle = 1;
+ ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
+ INT_MAX, GFP_KERNEL);
+ } else if (!oldprog) {
+ ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
+ handle, GFP_KERNEL);
}
+ if (ret)
+ goto errout;
+ prog->handle = handle;
+
ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
extack);
if (ret < 0)
@@ -526,7 +522,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
if (oldprog) {
- idr_replace_ext(&head->handle_idr, prog, handle);
+ idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
tcf_unbind_filter(tp, &oldprog->res);
tcf_exts_get_net(&oldprog->exts);
@@ -542,7 +538,7 @@ errout_parms:
cls_bpf_free_parms(prog);
errout_idr:
if (!oldprog)
- idr_remove_ext(&head->handle_idr, prog->handle);
+ idr_remove(&head->handle_idr, prog->handle);
errout:
tcf_exts_destroy(&prog->exts);
kfree(prog);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dc9acaafc0a8..7d0ce2c40f93 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -288,7 +288,7 @@ static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
- idr_remove_ext(&head->handle_idr, f->handle);
+ idr_remove(&head->handle_idr, f->handle);
list_del_rcu(&f->list);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, extack);
@@ -334,7 +334,7 @@ static void *fl_get(struct tcf_proto *tp, u32 handle)
{
struct cls_fl_head *head = rtnl_dereference(tp->root);
- return idr_find_ext(&head->handle_idr, handle);
+ return idr_find(&head->handle_idr, handle);
}
static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
@@ -865,7 +865,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_filter *fnew;
struct nlattr **tb;
struct fl_flow_mask mask = {};
- unsigned long idr_index;
int err;
if (!tca[TCA_OPTIONS])
@@ -896,21 +895,17 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout;
if (!handle) {
- err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
- 1, 0x80000000, GFP_KERNEL);
- if (err)
- goto errout;
- fnew->handle = idr_index;
- }
-
- /* user specifies a handle and it doesn't exist */
- if (handle && !fold) {
- err = idr_alloc_ext(&head->handle_idr, fnew, &idr_index,
- handle, handle + 1, GFP_KERNEL);
- if (err)
- goto errout;
- fnew->handle = idr_index;
+ handle = 1;
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ INT_MAX, GFP_KERNEL);
+ } else if (!fold) {
+ /* user specifies a handle and it doesn't exist */
+ err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ handle, GFP_KERNEL);
}
+ if (err)
+ goto errout;
+ fnew->handle = handle;
if (tb[TCA_FLOWER_FLAGS]) {
fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
@@ -966,8 +961,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
*arg = fnew;
if (fold) {
- fnew->handle = handle;
- idr_replace_ext(&head->handle_idr, fnew, fnew->handle);
+ idr_replace(&head->handle_idr, fnew, fnew->handle);
list_replace_rcu(&fold->list, &fnew->list);
tcf_unbind_filter(tp, &fold->res);
tcf_exts_get_net(&fold->exts);
@@ -981,7 +975,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
errout_idr:
if (fnew->handle)
- idr_remove_ext(&head->handle_idr, fnew->handle);
+ idr_remove(&head->handle_idr, fnew->handle);
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 6311a548046b..6c7601a530e3 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -316,19 +316,13 @@ static void *u32_get(struct tcf_proto *tp, u32 handle)
return u32_lookup_key(ht, handle);
}
+/* Protected by rtnl lock */
static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
{
- unsigned long idr_index;
- int err;
-
- /* This is only used inside rtnl lock it is safe to increment
- * without read _copy_ update semantics
- */
- err = idr_alloc_ext(&tp_c->handle_idr, ptr, &idr_index,
- 1, 0x7FF, GFP_KERNEL);
- if (err)
+ int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
+ if (id < 0)
return 0;
- return (u32)(idr_index | 0x800) << 20;
+ return (id | 0x800U) << 20;
}
static struct hlist_head *tc_u_common_hash;
@@ -598,7 +592,7 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
rtnl_dereference(n->next));
tcf_unbind_filter(tp, &n->res);
u32_remove_hw_knode(tp, n, extack);
- idr_remove_ext(&ht->handle_idr, n->handle);
+ idr_remove(&ht->handle_idr, n->handle);
if (tcf_exts_get_net(&n->exts))
call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
else
@@ -625,7 +619,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
- idr_remove_ext(&tp_c->handle_idr, ht->handle);
+ idr_remove(&tp_c->handle_idr, ht->handle);
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
@@ -747,19 +741,17 @@ ret:
static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
{
- unsigned long idr_index;
- u32 start = htid | 0x800;
+ u32 index = htid | 0x800;
u32 max = htid | 0xFFF;
- u32 min = htid;
- if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
- start, max + 1, GFP_KERNEL)) {
- if (idr_alloc_ext(&ht->handle_idr, NULL, &idr_index,
- min + 1, max + 1, GFP_KERNEL))
- return max;
+ if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
+ index = htid + 1;
+ if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
+ GFP_KERNEL))
+ index = max;
}
- return (u32)idr_index;
+ return index;
}
static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -849,7 +841,7 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
if (pins->handle == n->handle)
break;
- idr_replace_ext(&ht->handle_idr, n, n->handle);
+ idr_replace(&ht->handle_idr, n, n->handle);
RCU_INIT_POINTER(n->next, pins->next);
rcu_assign_pointer(*ins, n);
}
@@ -955,7 +947,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
- if (n->flags != flags) {
+ if ((n->flags ^ flags) &
+ ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
return -EINVAL;
}
@@ -1010,8 +1003,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOMEM;
}
} else {
- err = idr_alloc_ext(&tp_c->handle_idr, ht, NULL,
- handle, handle + 1, GFP_KERNEL);
+ err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
+ handle, GFP_KERNEL);
if (err) {
kfree(ht);
return err;
@@ -1027,7 +1020,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_replace_hw_hnode(tp, ht, flags, extack);
if (err) {
- idr_remove_ext(&tp_c->handle_idr, handle);
+ idr_remove(&tp_c->handle_idr, handle);
kfree(ht);
return err;
}
@@ -1067,8 +1060,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
handle = htid | TC_U32_NODE(handle);
- err = idr_alloc_ext(&ht->handle_idr, NULL, NULL,
- handle, handle + 1,
+ err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
GFP_KERNEL);
if (err)
return err;
@@ -1163,7 +1155,7 @@ errfree:
#endif
kfree(n);
erridr:
- idr_remove_ext(&ht->handle_idr, handle);
+ idr_remove(&ht->handle_idr, handle);
return err;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 7bbc13b8ca47..7c179addebcd 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -327,7 +327,7 @@ static s64 tabledist(s64 mu, s32 sigma,
/* default uniform distribution */
if (dist == NULL)
- return (rnd % (2 * sigma)) - sigma + mu;
+ return ((rnd % (2 * sigma)) + mu) - sigma;
t = dist->table[rnd % dist->size];
x = (sigma % NETEM_DIST_SCALE) * t;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 793b05ec692b..d01475f5f710 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1380,9 +1380,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
struct sctp_chunk *retval;
struct sk_buff *skb;
struct sock *sk;
+ int chunklen;
+
+ chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
+ if (chunklen > SCTP_MAX_CHUNK_LEN)
+ goto nodata;
/* No need to allocate LL here, as this is only a chunk. */
- skb = alloc_skb(SCTP_PAD4(sizeof(*chunk_hdr) + paylen), gfp);
+ skb = alloc_skb(chunklen, gfp);
if (!skb)
goto nodata;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 896691afbb1a..d9db2eab3a8d 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -461,6 +461,18 @@ static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct r
/*
* Wake up a task on a specific queue
*/
+void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue,
+ struct rpc_task *task)
+{
+ spin_lock_bh(&queue->lock);
+ rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
+ spin_unlock_bh(&queue->lock);
+}
+
+/*
+ * Wake up a task on a specific queue
+ */
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
spin_lock_bh(&queue->lock);
@@ -1092,12 +1104,12 @@ static int rpciod_start(void)
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
- wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
+ wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
if (!wq)
goto out_failed;
rpciod_workqueue = wq;
/* Note: highpri because network receive is latency sensitive */
- wq = alloc_workqueue("xprtiod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
if (!wq)
goto free_rpciod;
xprtiod_workqueue = wq;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5570719e4787..943f2a745cd5 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
unsigned int rcv)
{
-#if 0
- mm_segment_t oldfs;
- oldfs = get_fs(); set_fs(KERNEL_DS);
- sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
- (char*)&snd, sizeof(snd));
- sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
- (char*)&rcv, sizeof(rcv));
-#else
- /* sock_setsockopt limits use to sysctl_?mem_max,
- * which isn't acceptable. Until that is made conditional
- * on not having CAP_SYS_RESOURCE or similar, we go direct...
- * DaveM said I could!
- */
lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk);
-#endif
}
static int svc_sock_secure_port(struct svc_rqst *rqstp)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2436fd1125fc..8f0ad4f268da 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -517,7 +517,8 @@ void xprt_write_space(struct rpc_xprt *xprt)
if (xprt->snd_task) {
dprintk("RPC: write space: waking waiting task on "
"xprt %p\n", xprt);
- rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
+ rpc_wake_up_queued_task_on_wq(xprtiod_workqueue,
+ &xprt->pending, xprt->snd_task);
}
spin_unlock_bh(&xprt->transport_lock);
}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 162e5dd82466..f0855a959a27 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -143,7 +143,7 @@ static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
if (xdr->page_len) {
remaining = xdr->page_len;
offset = offset_in_page(xdr->page_base);
- count = 0;
+ count = RPCRDMA_MIN_SEND_SGES;
while (remaining) {
remaining -= min_t(unsigned int,
PAGE_SIZE - offset, remaining);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index af7893501e40..a73632ca9048 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -95,7 +95,6 @@ out_shortreply:
out_notfound:
dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
xprt, be32_to_cpu(xid));
-
goto out_unlock;
}
@@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
if (ret < 0)
goto out_err;
- ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
- if (ret)
- goto out_err;
-
/* Bump page refcnt so Send completion doesn't release
* the rq_buffer before all retransmits are complete.
*/
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index ad4bd62eebf1..19e9c6b33042 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
struct page *page;
int ret;
- ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
- if (ret)
- return;
-
page = alloc_page(GFP_KERNEL);
if (!page)
return;
@@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
&rqstp->rq_arg);
svc_rdma_put_context(ctxt, 0);
- if (ret)
- goto repost;
return ret;
}
@@ -590,6 +584,5 @@ out_postfail:
out_drop:
svc_rdma_put_context(ctxt, 1);
-repost:
- return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
+ return 0;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 9bd04549a1ad..12b9a7e0b6d2 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
head->arg.head[0].iov_len - info->ri_position;
head->arg.head[0].iov_len = info->ri_position;
- /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7).
+ /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
*
- * NFSv2/3 write decoders need the length of the tail to
- * contain the size of the roundup padding.
+ * If the client already rounded up the chunk length, the
+ * length does not change. Otherwise, the length of the page
+ * list is increased to include XDR round-up.
+ *
+ * Currently these chunks always start at page offset 0,
+ * thus the rounded-up length never crosses a page boundary.
*/
- head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3);
+ info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
head->arg.page_len = info->ri_chunklen;
head->arg.len += info->ri_chunklen;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 7c3a211e0e9a..649441d5087d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
}
- ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
- if (ret)
- goto err1;
ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp,
wr_lst, rp_ch);
if (ret < 0)
@@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (ret != -E2BIG && ret != -EINVAL)
goto err1;
- ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
- if (ret)
- goto err1;
ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp);
if (ret < 0)
goto err0;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 46ec069150d5..9ad12a215b51 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -58,6 +58,7 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
+static int svc_rdma_post_recv(struct svcxprt_rdma *xprt);
static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct net *net,
@@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
spin_unlock(&xprt->sc_rq_dto_lock);
+ svc_rdma_post_recv(xprt);
+
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
goto out;
@@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
return cma_xprt;
}
-int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
+static int
+svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{
struct ib_recv_wr recv_wr, *bad_recv_wr;
struct svc_rdma_op_ctxt *ctxt;
@@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
pr_err("svcrdma: Too many sges (%d)\n", sge_no);
goto err_put_ctxt;
}
- page = alloc_page(flags);
+ page = alloc_page(GFP_KERNEL);
if (!page)
goto err_put_ctxt;
ctxt->pages[sge_no] = page;
@@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
return -ENOMEM;
}
-int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
-{
- int ret = 0;
-
- ret = svc_rdma_post_recv(xprt, flags);
- if (ret) {
- pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
- ret);
- pr_err("svcrdma: closing transport %p.\n", xprt);
- set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
- ret = -ENOTCONN;
- }
- return ret;
-}
-
static void
svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
struct rdma_conn_param *param)
@@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Post receive buffers */
for (i = 0; i < newxprt->sc_max_requests; i++) {
- ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
+ ret = svc_rdma_post_recv(newxprt);
if (ret) {
dprintk("svcrdma: failure posting receive buffers\n");
goto errout;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f4eb63e8e689..e6f84a6434a0 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -505,7 +505,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
return -ENOMEM;
}
- ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
+ ia->ri_max_send_sges = max_sge;
if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
dprintk("RPC: %s: insufficient wqe's available\n",
@@ -1502,6 +1502,9 @@ __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
static void
rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
{
+ if (!rb)
+ return;
+
if (!rpcrdma_regbuf_is_mapped(rb))
return;
@@ -1517,9 +1520,6 @@ rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
void
rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
{
- if (!rb)
- return;
-
rpcrdma_dma_unmap_regbuf(rb);
kfree(rb);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 18803021f242..a6b8c1f8f92a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -807,13 +807,6 @@ static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
smp_mb__after_atomic();
}
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
-{
- xs_sock_reset_connection_flags(xprt);
- /* Mark transport as closed and wake up all pending tasks */
- xprt_disconnect_done(xprt);
-}
-
/**
* xs_error_report - callback to handle TCP socket state errors
* @sk: socket
@@ -833,9 +826,6 @@ static void xs_error_report(struct sock *sk)
err = -sk->sk_err;
if (err == 0)
goto out;
- /* Is this a reset event? */
- if (sk->sk_state == TCP_CLOSE)
- xs_sock_mark_closed(xprt);
dprintk("RPC: xs_error_report client %p, error=%d...\n",
xprt, -err);
trace_rpc_socket_error(xprt, sk->sk_socket, err);
@@ -1078,18 +1068,18 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
/* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
- __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
spin_lock(&xprt->recv_lock);
+ __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
goto out_unpin;
}
- __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
spin_lock_bh(&xprt->transport_lock);
xprt_adjust_cwnd(xprt, task, copied);
spin_unlock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
xprt_complete_rqst(task, copied);
+ __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
out_unpin:
xprt_unpin_rqst(rovr);
out_unlock:
@@ -1655,9 +1645,11 @@ static void xs_tcp_state_change(struct sock *sk)
if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
&transport->sock_state))
xprt_clear_connecting(xprt);
+ clear_bit(XPRT_CLOSING, &xprt->state);
if (sk->sk_err)
xprt_wake_pending_tasks(xprt, -sk->sk_err);
- xs_sock_mark_closed(xprt);
+ /* Trigger the socket release */
+ xs_tcp_force_close(xprt);
}
out:
read_unlock_bh(&sk->sk_callback_lock);
@@ -2265,14 +2257,19 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct socket *sock = transport->sock;
+ int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
if (sock == NULL)
return;
- if (xprt_connected(xprt)) {
+ switch (skst) {
+ default:
kernel_sock_shutdown(sock, SHUT_RDWR);
trace_rpc_socket_shutdown(xprt, sock);
- } else
+ break;
+ case TCP_CLOSE:
+ case TCP_TIME_WAIT:
xs_reset_transport(transport);
+ }
}
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 55d8ba92291d..4e1c6f6450bb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -208,8 +208,8 @@ bool tipc_msg_validate(struct sk_buff **_skb)
int msz, hsz;
/* Ensure that flow control ratio condition is satisfied */
- if (unlikely(skb->truesize / buf_roundup_len(skb) > 4)) {
- skb = skb_copy(skb, GFP_ATOMIC);
+ if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
+ skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
if (!skb)
return false;
kfree_skb(*_skb);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 736719c8314e..b0d5fcea47e7 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -484,6 +484,8 @@ out:
static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
+ .uid = TCP_ULP_TLS,
+ .user_visible = true,
.owner = THIS_MODULE,
.init = tls_init,
};
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci
index c990d2c7ee16..b2a2cf8bf81f 100644
--- a/scripts/coccinelle/free/devm_free.cocci
+++ b/scripts/coccinelle/free/devm_free.cocci
@@ -56,9 +56,62 @@ expression x;
x = devm_ioport_map(...)
)
+@safe depends on context || org || report exists@
+expression x;
+position p;
+@@
+
+(
+ x = kmalloc(...)
+|
+ x = kvasprintf(...)
+|
+ x = kasprintf(...)
+|
+ x = kzalloc(...)
+|
+ x = kmalloc_array(...)
+|
+ x = kcalloc(...)
+|
+ x = kstrdup(...)
+|
+ x = kmemdup(...)
+|
+ x = get_free_pages(...)
+|
+ x = request_irq(...)
+|
+ x = ioremap(...)
+|
+ x = ioremap_nocache(...)
+|
+ x = ioport_map(...)
+)
+...
+(
+ kfree@p(x)
+|
+ kzfree@p(x)
+|
+ __krealloc@p(x, ...)
+|
+ krealloc@p(x, ...)
+|
+ free_pages@p(x, ...)
+|
+ free_page@p(x)
+|
+ free_irq@p(x)
+|
+ iounmap@p(x)
+|
+ ioport_unmap@p(x)
+)
+
@pb@
expression r.x;
-position p;
+position p != safe.p;
@@
(
diff --git a/scripts/coccinelle/null/deref_null.cocci b/scripts/coccinelle/null/deref_null.cocci
index f192d6035d02..b16ccb7663a7 100644
--- a/scripts/coccinelle/null/deref_null.cocci
+++ b/scripts/coccinelle/null/deref_null.cocci
@@ -212,7 +212,7 @@ else S3
// The following three rules are duplicates of ifm, pr1 and pr2 respectively.
// It is need because the previous rule as already made a "change".
-@ifm1@
+@ifm1 depends on context && !org && !report@
expression *E;
statement S1,S2;
position p1;
@@ -220,7 +220,7 @@ position p1;
if@p1 ((E == NULL && ...) || ...) S1 else S2
-@pr11 expression@
+@pr11 depends on context && !org && !report expression@
expression *ifm1.E;
identifier f;
position p1;
@@ -228,7 +228,7 @@ position p1;
(E != NULL && ...) ? <+...E->f@p1...+> : ...
-@pr12 expression@
+@pr12 depends on context && !org && !report expression@
expression *ifm1.E;
identifier f;
position p2;
diff --git a/scripts/gcc-plugins/gcc-common.h b/scripts/gcc-plugins/gcc-common.h
index ffd1dfaa1cc1..f46750053377 100644
--- a/scripts/gcc-plugins/gcc-common.h
+++ b/scripts/gcc-plugins/gcc-common.h
@@ -97,6 +97,10 @@
#include "predict.h"
#include "ipa-utils.h"
+#if BUILDING_GCC_VERSION >= 8000
+#include "stringpool.h"
+#endif
+
#if BUILDING_GCC_VERSION >= 4009
#include "attribs.h"
#include "varasm.h"
diff --git a/scripts/gcc-plugins/latent_entropy_plugin.c b/scripts/gcc-plugins/latent_entropy_plugin.c
index 65264960910d..cbe1d6c4b1a5 100644
--- a/scripts/gcc-plugins/latent_entropy_plugin.c
+++ b/scripts/gcc-plugins/latent_entropy_plugin.c
@@ -255,21 +255,14 @@ static tree handle_latent_entropy_attribute(tree *node, tree name,
return NULL_TREE;
}
-static struct attribute_spec latent_entropy_attr = {
- .name = "latent_entropy",
- .min_length = 0,
- .max_length = 0,
- .decl_required = true,
- .type_required = false,
- .function_type_required = false,
- .handler = handle_latent_entropy_attribute,
-#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = false
-#endif
-};
+static struct attribute_spec latent_entropy_attr = { };
static void register_attributes(void *event_data __unused, void *data __unused)
{
+ latent_entropy_attr.name = "latent_entropy";
+ latent_entropy_attr.decl_required = true;
+ latent_entropy_attr.handler = handle_latent_entropy_attribute;
+
register_attribute(&latent_entropy_attr);
}
diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
index 0073af326449..c4a345c3715b 100644
--- a/scripts/gcc-plugins/randomize_layout_plugin.c
+++ b/scripts/gcc-plugins/randomize_layout_plugin.c
@@ -580,68 +580,35 @@ static void finish_type(void *event_data, void *data)
return;
}
-static struct attribute_spec randomize_layout_attr = {
- .name = "randomize_layout",
- // related to args
- .min_length = 0,
- .max_length = 0,
- .decl_required = false,
- // need type declaration
- .type_required = true,
- .function_type_required = false,
- .handler = handle_randomize_layout_attr,
-#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = true
-#endif
-};
+static struct attribute_spec randomize_layout_attr = { };
+static struct attribute_spec no_randomize_layout_attr = { };
+static struct attribute_spec randomize_considered_attr = { };
+static struct attribute_spec randomize_performed_attr = { };
-static struct attribute_spec no_randomize_layout_attr = {
- .name = "no_randomize_layout",
- // related to args
- .min_length = 0,
- .max_length = 0,
- .decl_required = false,
- // need type declaration
- .type_required = true,
- .function_type_required = false,
- .handler = handle_randomize_layout_attr,
+static void register_attributes(void *event_data, void *data)
+{
+ randomize_layout_attr.name = "randomize_layout";
+ randomize_layout_attr.type_required = true;
+ randomize_layout_attr.handler = handle_randomize_layout_attr;
#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = true
+ randomize_layout_attr.affects_type_identity = true;
#endif
-};
-static struct attribute_spec randomize_considered_attr = {
- .name = "randomize_considered",
- // related to args
- .min_length = 0,
- .max_length = 0,
- .decl_required = false,
- // need type declaration
- .type_required = true,
- .function_type_required = false,
- .handler = handle_randomize_considered_attr,
+ no_randomize_layout_attr.name = "no_randomize_layout";
+ no_randomize_layout_attr.type_required = true;
+ no_randomize_layout_attr.handler = handle_randomize_layout_attr;
#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = false
+ no_randomize_layout_attr.affects_type_identity = true;
#endif
-};
-static struct attribute_spec randomize_performed_attr = {
- .name = "randomize_performed",
- // related to args
- .min_length = 0,
- .max_length = 0,
- .decl_required = false,
- // need type declaration
- .type_required = true,
- .function_type_required = false,
- .handler = handle_randomize_performed_attr,
-#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = false
-#endif
-};
+ randomize_considered_attr.name = "randomize_considered";
+ randomize_considered_attr.type_required = true;
+ randomize_considered_attr.handler = handle_randomize_considered_attr;
+
+ randomize_performed_attr.name = "randomize_performed";
+ randomize_performed_attr.type_required = true;
+ randomize_performed_attr.handler = handle_randomize_performed_attr;
-static void register_attributes(void *event_data, void *data)
-{
register_attribute(&randomize_layout_attr);
register_attribute(&no_randomize_layout_attr);
register_attribute(&randomize_considered_attr);
diff --git a/scripts/gcc-plugins/structleak_plugin.c b/scripts/gcc-plugins/structleak_plugin.c
index 3f8dd4868178..10292f791e99 100644
--- a/scripts/gcc-plugins/structleak_plugin.c
+++ b/scripts/gcc-plugins/structleak_plugin.c
@@ -57,21 +57,16 @@ static tree handle_user_attribute(tree *node, tree name, tree args, int flags, b
return NULL_TREE;
}
-static struct attribute_spec user_attr = {
- .name = "user",
- .min_length = 0,
- .max_length = 0,
- .decl_required = false,
- .type_required = false,
- .function_type_required = false,
- .handler = handle_user_attribute,
-#if BUILDING_GCC_VERSION >= 4007
- .affects_type_identity = true
-#endif
-};
+static struct attribute_spec user_attr = { };
static void register_attributes(void *event_data, void *data)
{
+ user_attr.name = "user";
+ user_attr.handler = handle_user_attribute;
+#if BUILDING_GCC_VERSION >= 4007
+ user_attr.affects_type_identity = true;
+#endif
+
register_attribute(&user_attr);
}
diff --git a/scripts/kconfig/.gitignore b/scripts/kconfig/.gitignore
index 51f1c877b543..a76856e559c0 100644
--- a/scripts/kconfig/.gitignore
+++ b/scripts/kconfig/.gitignore
@@ -1,7 +1,6 @@
#
# Generated files
#
-config*
*.lex.c
*.tab.c
*.tab.h
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 307bc3f7b945..822dc51923d6 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -39,7 +39,6 @@ static enum input_mode input_mode = oldaskconfig;
static int indent = 1;
static int tty_stdio;
-static int valid_stdin = 1;
static int sync_kconfig;
static int conf_cnt;
static char line[PATH_MAX];
@@ -72,21 +71,14 @@ static void strip(char *str)
*p-- = 0;
}
-static void check_stdin(void)
-{
- if (!valid_stdin) {
- printf(_("aborted!\n\n"));
- printf(_("Console input/output is redirected. "));
- printf(_("Run 'make oldconfig' to update configuration.\n\n"));
- exit(1);
- }
-}
-
/* Helper function to facilitate fgets() by Jean Sacren. */
static void xfgets(char *str, int size, FILE *in)
{
if (!fgets(str, size, in))
fprintf(stderr, "\nError in reading or end of file.\n");
+
+ if (!tty_stdio)
+ printf("%s", str);
}
static int conf_askvalue(struct symbol *sym, const char *def)
@@ -113,13 +105,10 @@ static int conf_askvalue(struct symbol *sym, const char *def)
printf("%s\n", def);
return 0;
}
- check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
xfgets(line, sizeof(line), stdin);
- if (!tty_stdio)
- printf("\n");
return 1;
default:
break;
@@ -199,9 +188,7 @@ static int conf_sym(struct menu *menu)
printf("/m");
if (oldval != yes && sym_tristate_within_range(sym, yes))
printf("/y");
- if (menu_has_help(menu))
- printf("/?");
- printf("] ");
+ printf("/?] ");
if (!conf_askvalue(sym, sym_get_string_value(sym)))
return 0;
strip(line);
@@ -303,10 +290,7 @@ static int conf_choice(struct menu *menu)
printf("[1]: 1\n");
goto conf_childs;
}
- printf("[1-%d", cnt);
- if (menu_has_help(menu))
- printf("?");
- printf("]: ");
+ printf("[1-%d?]: ", cnt);
switch (input_mode) {
case oldconfig:
case silentoldconfig:
@@ -315,7 +299,6 @@ static int conf_choice(struct menu *menu)
printf("%d\n", cnt);
break;
}
- check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
@@ -508,7 +491,7 @@ int main(int ac, char **av)
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
- tty_stdio = isatty(0) && isatty(1) && isatty(2);
+ tty_stdio = isatty(0) && isatty(1);
while ((opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1) {
if (opt == 's') {
@@ -565,7 +548,7 @@ int main(int ac, char **av)
}
}
if (ac == optind) {
- printf(_("%s: Kconfig file missing\n"), av[0]);
+ fprintf(stderr, _("%s: Kconfig file missing\n"), av[0]);
conf_usage(progname);
exit(1);
}
@@ -590,9 +573,11 @@ int main(int ac, char **av)
if (!defconfig_file)
defconfig_file = conf_get_default_confname();
if (conf_read(defconfig_file)) {
- printf(_("***\n"
- "*** Can't find default configuration \"%s\"!\n"
- "***\n"), defconfig_file);
+ fprintf(stderr,
+ _("***\n"
+ "*** Can't find default configuration \"%s\"!\n"
+ "***\n"),
+ defconfig_file);
exit(1);
}
break;
@@ -650,7 +635,6 @@ int main(int ac, char **av)
return 1;
}
}
- valid_stdin = tty_stdio;
}
switch (input_mode) {
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index f7927391de30..5c12dc91ef34 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -201,7 +201,7 @@ static int add_byte(int c, char **lineptr, size_t slen, size_t *n)
if (new_size > *n) {
new_size += LINE_GROWTH - 1;
new_size *= 2;
- nline = realloc(*lineptr, new_size);
+ nline = xrealloc(*lineptr, new_size);
if (!nline)
return -1;
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index 2ba332b3fed7..d45381986ac7 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -94,7 +94,7 @@ struct expr *expr_copy(const struct expr *org)
e->right.expr = expr_copy(org->right.expr);
break;
default:
- printf("can't copy type %d\n", e->type);
+ fprintf(stderr, "can't copy type %d\n", e->type);
free(e);
e = NULL;
break;
@@ -127,7 +127,7 @@ void expr_free(struct expr *e)
expr_free(e->right.expr);
break;
default:
- printf("how to free type %d?\n", e->type);
+ fprintf(stderr, "how to free type %d?\n", e->type);
break;
}
free(e);
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 16cb62b92650..4e23febbe4b2 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -114,6 +114,7 @@ struct file *file_lookup(const char *name);
int file_write_dep(const char *name);
void *xmalloc(size_t size);
void *xcalloc(size_t nmemb, size_t size);
+void *xrealloc(void *p, size_t size);
struct gstr {
size_t len;
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index 5d86e2dfae59..9dc8abfb1dc3 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -31,7 +31,7 @@ extern struct symbol * symbol_hash[SYMBOL_HASHSIZE];
struct symbol * sym_lookup(const char *name, int flags);
struct symbol * sym_find(const char *name);
-const char * sym_expand_string_value(const char *in);
+char *sym_expand_string_value(const char *in);
const char * sym_escape_string_value(const char *in);
struct symbol ** sym_re_search(const char *pattern);
const char * sym_type_name(enum symbol_type type);
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index a64b1c31253e..88874acfda36 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -6,6 +6,7 @@
*
*/
#include "nconf.h"
+#include "lkc.h"
/* a list of all the different widgets we use */
attributes_t attributes[ATTR_MAX+1] = {0};
@@ -374,7 +375,7 @@ int dialog_inputbox(WINDOW *main_window,
if (strlen(init)+1 > *result_len) {
*result_len = strlen(init)+1;
- *resultp = result = realloc(result, *result_len);
+ *resultp = result = xrealloc(result, *result_len);
}
/* find the widest line of msg: */
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c9123ed2b791..cca9663be5dd 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -371,11 +371,13 @@ void sym_calc_value(struct symbol *sym)
sym->curr.tri = no;
return;
}
- if (!sym_is_choice_value(sym))
- sym->flags &= ~SYMBOL_WRITE;
+ sym->flags &= ~SYMBOL_WRITE;
sym_calc_visibility(sym);
+ if (sym->visible != no)
+ sym->flags |= SYMBOL_WRITE;
+
/* set default if recursively called */
sym->curr = newval;
@@ -390,7 +392,6 @@ void sym_calc_value(struct symbol *sym)
/* if the symbol is visible use the user value
* if available, otherwise try the default value
*/
- sym->flags |= SYMBOL_WRITE;
if (sym_has_value(sym)) {
newval.tri = EXPR_AND(sym->def[S_DEF_USER].tri,
sym->visible);
@@ -433,12 +434,9 @@ void sym_calc_value(struct symbol *sym)
case S_STRING:
case S_HEX:
case S_INT:
- if (sym->visible != no) {
- sym->flags |= SYMBOL_WRITE;
- if (sym_has_value(sym)) {
- newval.val = sym->def[S_DEF_USER].val;
- break;
- }
+ if (sym->visible != no && sym_has_value(sym)) {
+ newval.val = sym->def[S_DEF_USER].val;
+ break;
}
prop = sym_get_default_prop(sym);
if (prop) {
@@ -901,7 +899,7 @@ struct symbol *sym_find(const char *name)
* name to be expanded shall be prefixed by a '$'. Unknown symbol expands to
* the empty string.
*/
-const char *sym_expand_string_value(const char *in)
+char *sym_expand_string_value(const char *in)
{
const char *src;
char *res;
@@ -938,7 +936,7 @@ const char *sym_expand_string_value(const char *in)
newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
if (newlen > reslen) {
reslen = newlen;
- res = realloc(res, reslen);
+ res = xrealloc(res, reslen);
}
strcat(res, symval);
@@ -1223,7 +1221,7 @@ static struct symbol *sym_check_expr_deps(struct expr *e)
default:
break;
}
- printf("Oops! How to check %d?\n", e->type);
+ fprintf(stderr, "Oops! How to check %d?\n", e->type);
return NULL;
}
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index 0e76042473cc..b98a79e30e04 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -14,11 +14,11 @@
struct file *file_lookup(const char *name)
{
struct file *file;
- const char *file_name = sym_expand_string_value(name);
+ char *file_name = sym_expand_string_value(name);
for (file = file_list; file; file = file->next) {
if (!strcmp(name, file->name)) {
- free((void *)file_name);
+ free(file_name);
return file;
}
}
@@ -104,7 +104,7 @@ void str_append(struct gstr *gs, const char *s)
if (s) {
l = strlen(gs->s) + strlen(s) + 1;
if (l > gs->len) {
- gs->s = realloc(gs->s, l);
+ gs->s = xrealloc(gs->s, l);
gs->len = l;
}
strcat(gs->s, s);
@@ -145,3 +145,12 @@ void *xcalloc(size_t nmemb, size_t size)
fprintf(stderr, "Out of memory.\n");
exit(1);
}
+
+void *xrealloc(void *p, size_t size)
+{
+ p = realloc(p, size);
+ if (p)
+ return p;
+ fprintf(stderr, "Out of memory.\n");
+ exit(1);
+}
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index 07e074dc68a1..02de6fe302a9 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -52,7 +52,7 @@ static void append_string(const char *str, int size)
if (new_size > text_asize) {
new_size += START_STRSIZE - 1;
new_size &= -START_STRSIZE;
- text = realloc(text, new_size);
+ text = xrealloc(text, new_size);
text_asize = new_size;
}
memcpy(text + text_size, str, size);
@@ -184,7 +184,9 @@ n [A-Za-z0-9_-]
append_string(yytext, 1);
}
\n {
- printf("%s:%d:warning: multi-line strings not supported\n", zconf_curname(), zconf_lineno());
+ fprintf(stderr,
+ "%s:%d:warning: multi-line strings not supported\n",
+ zconf_curname(), zconf_lineno());
current_file->lineno++;
BEGIN(INITIAL);
return T_EOL;
@@ -294,7 +296,7 @@ void zconf_initscan(const char *name)
{
yyin = zconf_fopen(name);
if (!yyin) {
- printf("can't find file %s\n", name);
+ fprintf(stderr, "can't find file %s\n", name);
exit(1);
}
@@ -315,8 +317,8 @@ void zconf_nextfile(const char *name)
current_buf->state = YY_CURRENT_BUFFER;
yyin = zconf_fopen(file->name);
if (!yyin) {
- printf("%s:%d: can't open file \"%s\"\n",
- zconf_curname(), zconf_lineno(), file->name);
+ fprintf(stderr, "%s:%d: can't open file \"%s\"\n",
+ zconf_curname(), zconf_lineno(), file->name);
exit(1);
}
yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
@@ -325,20 +327,21 @@ void zconf_nextfile(const char *name)
for (iter = current_file->parent; iter; iter = iter->parent ) {
if (!strcmp(current_file->name,iter->name) ) {
- printf("%s:%d: recursive inclusion detected. "
- "Inclusion path:\n current file : '%s'\n",
- zconf_curname(), zconf_lineno(),
- zconf_curname());
+ fprintf(stderr,
+ "%s:%d: recursive inclusion detected. "
+ "Inclusion path:\n current file : '%s'\n",
+ zconf_curname(), zconf_lineno(),
+ zconf_curname());
iter = current_file->parent;
while (iter && \
strcmp(iter->name,current_file->name)) {
- printf(" included from: '%s:%d'\n",
- iter->name, iter->lineno-1);
+ fprintf(stderr, " included from: '%s:%d'\n",
+ iter->name, iter->lineno-1);
iter = iter->parent;
}
if (iter)
- printf(" included from: '%s:%d'\n",
- iter->name, iter->lineno+1);
+ fprintf(stderr, " included from: '%s:%d'\n",
+ iter->name, iter->lineno+1);
exit(1);
}
}
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 21ce883e5d9e..4be98050b961 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -436,6 +436,12 @@ help: help_start T_HELPTEXT
zconfprint("warning: '%s' defined with more than one help text -- only the last one will be used",
current_entry->sym->name ?: "<choice>");
}
+
+ /* Is the help text empty or all whitespace? */
+ if ($2[strspn($2, " \f\n\r\t\v")] == '\0')
+ zconfprint("warning: '%s' defined with blank help text",
+ current_entry->sym->name ?: "<choice>");
+
current_entry->help = $2;
};
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index c462a928e03d..a9d47c1558bb 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -23,7 +23,12 @@ DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
man: man8
man8: $(DOC_MAN8)
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+
$(OUTPUT)%.8: %.rst
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
$(QUIET_GEN)rst2man $< > $@
clean:
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 2fe2a1bdbe3e..0e4e923235b6 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -26,8 +26,8 @@ MAP COMMANDS
| **bpftool** **cgroup help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
-| *ATTACH_TYPE* := { *ingress* | *egress* | *sock_create* | *sock_ops* | *device* }
-| *ATTACH_FLAGS* := { *multi* | *override* }
+| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** }
+| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
===========
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 0ab32b312aec..457e868bd32f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -31,7 +31,8 @@ MAP COMMANDS
| **bpftool** **map help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
-| *VALUE* := { *BYTES* | *MAP* | *PROGRAM* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *VALUE* := { *BYTES* | *MAP* | *PROG* }
| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
DESCRIPTION
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 0137866bb8f6..08719c54a614 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -52,16 +52,24 @@ _bpftool_once_attr()
done
}
-# Takes a list of words in argument; adds them all to COMPREPLY if none of them
-# is already present on the command line. Returns no value.
-_bpftool_one_of_list()
+# Takes a list of words as argument; if any of those words is present on the
+# command line, return 0. Otherwise, return 1.
+_bpftool_search_list()
{
local w idx
for w in $*; do
for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
- [[ $w == ${words[idx]} ]] && return 1
+ [[ $w == ${words[idx]} ]] && return 0
done
done
+ return 1
+}
+
+# Takes a list of words in argument; adds them all to COMPREPLY if none of them
+# is already present on the command line. Returns no value.
+_bpftool_one_of_list()
+{
+ _bpftool_search_list $* && return 1
COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
}
@@ -230,10 +238,14 @@ _bpftool()
fi
return 0
;;
+ load)
+ _filedir
+ return 0
+ ;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'dump help pin show list' -- \
- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'dump help pin load \
+ show list' -- "$cur" ) )
;;
esac
;;
@@ -347,6 +359,54 @@ _bpftool()
;;
esac
;;
+ cgroup)
+ case $command in
+ show|list)
+ _filedir
+ return 0
+ ;;
+ attach|detach)
+ local ATTACH_TYPES='ingress egress sock_create sock_ops \
+ device'
+ local ATTACH_FLAGS='multi override'
+ local PROG_TYPE='id pinned tag'
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ ingress|egress|sock_create|sock_ops|device)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ *)
+ if ! _bpftool_search_list "$ATTACH_TYPES"; then
+ COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- \
+ "$cur" ) )
+ elif [[ "$command" == "attach" ]]; then
+ # We have an attach type on the command line,
+ # but it is not the previous word, or
+ # "id|pinned|tag" (we already checked for
+ # that). This should only leave the case when
+ # we need attach flags for "attach" commamnd.
+ _bpftool_one_of_list "$ATTACH_FLAGS"
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help attach detach \
+ show list' -- "$cur" ) )
+ ;;
+ esac
+ ;;
esac
} &&
complete -F _bpftool bpftool
diff --git a/tools/include/uapi/linux/bpf_common.h b/tools/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/tools/include/uapi/linux/bpf_common.h
+++ b/tools/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
+#define BPF_W 0x00 /* 32-bit */
+#define BPF_H 0x08 /* 16-bit */
+#define BPF_B 0x10 /* 8-bit */
+/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 71ddc481f349..97073d649c1a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -319,8 +319,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->section_name = strdup(section_name);
if (!prog->section_name) {
- pr_warning("failed to alloc name for prog under section %s\n",
- section_name);
+ pr_warning("failed to alloc name for prog under section(%d) %s\n",
+ idx, section_name);
goto errout;
}
@@ -742,6 +742,24 @@ bpf_object__init_maps(struct bpf_object *obj)
return 0;
}
+static bool section_have_execinstr(struct bpf_object *obj, int idx)
+{
+ Elf_Scn *scn;
+ GElf_Shdr sh;
+
+ scn = elf_getscn(obj->efile.elf, idx);
+ if (!scn)
+ return false;
+
+ if (gelf_getshdr(scn, &sh) != &sh)
+ return false;
+
+ if (sh.sh_flags & SHF_EXECINSTR)
+ return true;
+
+ return false;
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
@@ -763,29 +781,29 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
idx++;
if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warning("failed to get section header from %s\n",
- obj->path);
+ pr_warning("failed to get section(%d) header from %s\n",
+ idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
if (!name) {
- pr_warning("failed to get section name from %s\n",
- obj->path);
+ pr_warning("failed to get section(%d) name from %s\n",
+ idx, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
data = elf_getdata(scn, 0);
if (!data) {
- pr_warning("failed to get section data from %s(%s)\n",
- name, obj->path);
+ pr_warning("failed to get section(%d) data from %s(%s)\n",
+ idx, name, obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto out;
}
- pr_debug("section %s, size %ld, link %d, flags %lx, type=%d\n",
- name, (unsigned long)data->d_size,
+ pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+ idx, name, (unsigned long)data->d_size,
(int)sh.sh_link, (unsigned long)sh.sh_flags,
(int)sh.sh_type);
@@ -825,6 +843,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (sh.sh_type == SHT_REL) {
void *reloc = obj->efile.reloc;
int nr_reloc = obj->efile.nr_reloc + 1;
+ int sec = sh.sh_info; /* points to other section */
+
+ /* Only do relo for section with exec instructions */
+ if (!section_have_execinstr(obj, sec)) {
+ pr_debug("skip relo %s(%d) for section(%d)\n",
+ name, idx, sec);
+ continue;
+ }
reloc = realloc(reloc,
sizeof(*obj->efile.reloc) * nr_reloc);
@@ -840,6 +866,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
obj->efile.reloc[n].shdr = sh;
obj->efile.reloc[n].data = data;
}
+ } else {
+ pr_debug("skip section(%d) %s\n", idx, name);
}
if (err)
goto out;
@@ -1119,8 +1147,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
prog = bpf_object__find_prog_by_idx(obj, idx);
if (!prog) {
- pr_warning("relocation failed: no %d section\n",
- idx);
+ pr_warning("relocation failed: no section(%d)\n", idx);
return -LIBBPF_ERRNO__RELOC;
}
@@ -1816,12 +1843,17 @@ static const struct {
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
BPF_PROG_SEC("kprobe/", BPF_PROG_TYPE_KPROBE),
BPF_PROG_SEC("kretprobe/", BPF_PROG_TYPE_KPROBE),
+ BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
+ BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
BPF_PROG_SEC("tracepoint/", BPF_PROG_TYPE_TRACEPOINT),
BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
BPF_PROG_SEC("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
BPF_PROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK),
BPF_PROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE),
+ BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
+ BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
+ BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
BPF_PROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS),
BPF_PROG_SEC("sk_skb", BPF_PROG_TYPE_SK_SKB),
};
diff --git a/tools/power/acpi/common/cmfsize.c b/tools/power/acpi/common/cmfsize.c
index 5b38dc2fec4f..dfa6fee1b915 100644
--- a/tools/power/acpi/common/cmfsize.c
+++ b/tools/power/acpi/common/cmfsize.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/common/getopt.c b/tools/power/acpi/common/getopt.c
index 6e78413bb2cb..f7032c9ab35e 100644
--- a/tools/power/acpi/common/getopt.c
+++ b/tools/power/acpi/common/getopt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
index 52a39ecf5ca1..e7347edfd4f9 100644
--- a/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
+++ b/tools/power/acpi/os_specific/service_layers/oslinuxtbl.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixdir.c b/tools/power/acpi/os_specific/service_layers/osunixdir.c
index ea14eaeb268f..9b5e61b636d9 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixdir.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixdir.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixmap.c b/tools/power/acpi/os_specific/service_layers/osunixmap.c
index cf9b5a54df92..8b26924e2602 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixmap.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixmap.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 025c1b07049d..34c044da433c 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/acpidump.h b/tools/power/acpi/tools/acpidump/acpidump.h
index d6aa40fce2b1..3c679607d1c3 100644
--- a/tools/power/acpi/tools/acpidump/acpidump.h
+++ b/tools/power/acpi/tools/acpidump/acpidump.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 0634449156d8..9ad1712e4cf9 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apfiles.c b/tools/power/acpi/tools/acpidump/apfiles.c
index d686e11936c4..856e1b83407b 100644
--- a/tools/power/acpi/tools/acpidump/apfiles.c
+++ b/tools/power/acpi/tools/acpidump/apfiles.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index be418fba9441..f4ef826800cf 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2017, Intel Corp.
+ * Copyright (C) 2000 - 2018, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 30cd0b296f1a..44ef9eba5a7a 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -153,11 +153,12 @@ void idr_nowait_test(void)
idr_destroy(&idr);
}
-void idr_get_next_test(void)
+void idr_get_next_test(int base)
{
unsigned long i;
int nextid;
DEFINE_IDR(idr);
+ idr_init_base(&idr, base);
int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0};
@@ -207,6 +208,7 @@ void idr_checks(void)
assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i);
}
assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC);
+ assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC);
idr_for_each(&idr, item_idr_free, &idr);
idr_destroy(&idr);
@@ -214,6 +216,23 @@ void idr_checks(void)
assert(idr_is_empty(&idr));
+ idr_set_cursor(&idr, INT_MAX - 3UL);
+ for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) {
+ struct item *item;
+ unsigned int id;
+ if (i <= INT_MAX)
+ item = item_create(i, 0);
+ else
+ item = item_create(i - INT_MAX - 1, 0);
+
+ id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL);
+ assert(id == item->index);
+ }
+
+ idr_for_each(&idr, item_idr_free, &idr);
+ idr_destroy(&idr);
+ assert(idr_is_empty(&idr));
+
for (i = 1; i < 10000; i++) {
struct item *item = item_create(i, 0);
assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i);
@@ -226,7 +245,9 @@ void idr_checks(void)
idr_alloc_test();
idr_null_test();
idr_nowait_test();
- idr_get_next_test();
+ idr_get_next_test(0);
+ idr_get_next_test(1);
+ idr_get_next_test(4);
}
/*
@@ -380,7 +401,7 @@ void ida_check_random(void)
do {
ida_pre_get(&ida, GFP_KERNEL);
err = ida_get_new_above(&ida, bit, &id);
- } while (err == -ENOMEM);
+ } while (err == -EAGAIN);
assert(!err);
assert(id == bit);
}
@@ -489,7 +510,7 @@ static void *ida_random_fn(void *arg)
void ida_thread_tests(void)
{
- pthread_t threads[10];
+ pthread_t threads[20];
int i;
for (i = 0; i < ARRAY_SIZE(threads); i++)
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index c3bc3f364f68..426f32f28547 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -17,6 +17,4 @@
#define pr_debug printk
#define pr_cont printk
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
#endif /* _KERNEL_H */
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 566d6adc172a..5c43c187f27c 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -13,6 +13,7 @@ endif
CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
LDLIBS += -lcap -lelf -lrt -lpthread
+# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user
@@ -22,15 +23,24 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
sample_map_ret0.o test_tcpbpf_kern.o
-TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \
+# Order correspond to 'make run_tests' order
+TEST_PROGS := test_kmod.sh \
+ test_libbpf.sh \
+ test_xdp_redirect.sh \
+ test_xdp_meta.sh \
test_offload.py
+# Compile but not part of 'make run_tests'
+TEST_GEN_PROGS_EXTENDED = test_libbpf_open
+
include ../lib.mk
BPFOBJ := $(OUTPUT)/libbpf.a cgroup_helpers.c
$(TEST_GEN_PROGS): $(BPFOBJ)
+$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a
+
.PHONY: force
# force a rebuild of BPFOBJ when its dependencies are updated
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index ed4774d8d6ed..35669ccd4d23 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -10,9 +10,21 @@ test_run()
echo "[ JIT enabled:$1 hardened:$2 ]"
dmesg -C
- insmod $SRC_TREE/lib/test_bpf.ko 2> /dev/null
- if [ $? -ne 0 ]; then
- rc=1
+ if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then
+ insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null
+ if [ $? -ne 0 ]; then
+ rc=1
+ fi
+ else
+ # Use modprobe dry run to check for missing test_bpf module
+ if ! /sbin/modprobe -q -n test_bpf; then
+ echo "test_bpf: [SKIP]"
+ elif /sbin/modprobe -q test_bpf; then
+ echo "test_bpf: ok"
+ else
+ echo "test_bpf: [FAIL]"
+ rc=1
+ fi
fi
rmmod test_bpf 2> /dev/null
dmesg | grep FAIL
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
new file mode 100755
index 000000000000..d97dc914cd49
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+export TESTNAME=test_libbpf
+
+# Determine selftest success via shell exit code
+exit_handler()
+{
+ if (( $? == 0 )); then
+ echo "selftests: $TESTNAME [PASS]";
+ else
+ echo "$TESTNAME: failed at file $LAST_LOADED" 1>&2
+ echo "selftests: $TESTNAME [FAILED]";
+ fi
+}
+
+libbpf_open_file()
+{
+ LAST_LOADED=$1
+ if [ -n "$VERBOSE" ]; then
+ ./test_libbpf_open $1
+ else
+ ./test_libbpf_open --quiet $1
+ fi
+}
+
+# Exit script immediately (well catched by trap handler) if any
+# program/thing exits with a non-zero status.
+set -e
+
+# (Use 'trap -l' to list meaning of numbers)
+trap exit_handler 0 2 3 6 9
+
+libbpf_open_file test_l4lb.o
+
+# TODO: fix libbpf to load noinline functions
+# [warning] libbpf: incorrect bpf_call opcode
+#libbpf_open_file test_l4lb_noinline.o
+
+# TODO: fix test_xdp_meta.c to load with libbpf
+# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
+#libbpf_open_file test_xdp_meta.o
+
+# TODO: fix libbpf to handle .eh_frame
+# [warning] libbpf: relocation failed: no section(10)
+#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
+
+# Success
+exit 0
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
new file mode 100644
index 000000000000..8fcd1c076add
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018 Jesper Dangaard Brouer, Red Hat Inc.
+ */
+static const char *__doc__ =
+ "Libbpf test program for loading BPF ELF object files";
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <bpf/libbpf.h>
+#include <getopt.h>
+
+static const struct option long_options[] = {
+ {"help", no_argument, NULL, 'h' },
+ {"debug", no_argument, NULL, 'D' },
+ {"quiet", no_argument, NULL, 'q' },
+ {0, 0, NULL, 0 }
+};
+
+static void usage(char *argv[])
+{
+ int i;
+
+ printf("\nDOCUMENTATION:\n%s\n\n", __doc__);
+ printf(" Usage: %s (options-see-below) BPF_FILE\n", argv[0]);
+ printf(" Listing options:\n");
+ for (i = 0; long_options[i].name != 0; i++) {
+ printf(" --%-12s", long_options[i].name);
+ printf(" short-option: -%c",
+ long_options[i].val);
+ printf("\n");
+ }
+ printf("\n");
+}
+
+#define DEFINE_PRINT_FN(name, enabled) \
+static int libbpf_##name(const char *fmt, ...) \
+{ \
+ va_list args; \
+ int ret; \
+ \
+ va_start(args, fmt); \
+ if (enabled) { \
+ fprintf(stderr, "[" #name "] "); \
+ ret = vfprintf(stderr, fmt, args); \
+ } \
+ va_end(args); \
+ return ret; \
+}
+DEFINE_PRINT_FN(warning, 1)
+DEFINE_PRINT_FN(info, 1)
+DEFINE_PRINT_FN(debug, 1)
+
+#define EXIT_FAIL_LIBBPF EXIT_FAILURE
+#define EXIT_FAIL_OPTION 2
+
+int test_walk_progs(struct bpf_object *obj, bool verbose)
+{
+ struct bpf_program *prog;
+ int cnt = 0;
+
+ bpf_object__for_each_program(prog, obj) {
+ cnt++;
+ if (verbose)
+ printf("Prog (count:%d) section_name: %s\n", cnt,
+ bpf_program__title(prog, false));
+ }
+ return 0;
+}
+
+int test_walk_maps(struct bpf_object *obj, bool verbose)
+{
+ struct bpf_map *map;
+ int cnt = 0;
+
+ bpf_map__for_each(map, obj) {
+ cnt++;
+ if (verbose)
+ printf("Map (count:%d) name: %s\n", cnt,
+ bpf_map__name(map));
+ }
+ return 0;
+}
+
+int test_open_file(char *filename, bool verbose)
+{
+ struct bpf_object *bpfobj = NULL;
+ long err;
+
+ if (verbose)
+ printf("Open BPF ELF-file with libbpf: %s\n", filename);
+
+ /* Load BPF ELF object file and check for errors */
+ bpfobj = bpf_object__open(filename);
+ err = libbpf_get_error(bpfobj);
+ if (err) {
+ char err_buf[128];
+ libbpf_strerror(err, err_buf, sizeof(err_buf));
+ if (verbose)
+ printf("Unable to load eBPF objects in file '%s': %s\n",
+ filename, err_buf);
+ return EXIT_FAIL_LIBBPF;
+ }
+ test_walk_progs(bpfobj, verbose);
+ test_walk_maps(bpfobj, verbose);
+
+ if (verbose)
+ printf("Close BPF ELF-file with libbpf: %s\n",
+ bpf_object__name(bpfobj));
+ bpf_object__close(bpfobj);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ char filename[1024] = { 0 };
+ bool verbose = 1;
+ int longindex = 0;
+ int opt;
+
+ libbpf_set_print(libbpf_warning, libbpf_info, NULL);
+
+ /* Parse commands line args */
+ while ((opt = getopt_long(argc, argv, "hDq",
+ long_options, &longindex)) != -1) {
+ switch (opt) {
+ case 'D':
+ libbpf_set_print(libbpf_warning, libbpf_info,
+ libbpf_debug);
+ break;
+ case 'q': /* Use in scripting mode */
+ verbose = 0;
+ break;
+ case 'h':
+ default:
+ usage(argv);
+ return EXIT_FAIL_OPTION;
+ }
+ }
+ if (optind >= argc) {
+ usage(argv);
+ printf("ERROR: Expected BPF_FILE argument after options\n");
+ return EXIT_FAIL_OPTION;
+ }
+ snprintf(filename, sizeof(filename), "%s", argv[optind]);
+
+ return test_open_file(filename, verbose);
+}
diff --git a/tools/testing/selftests/bpf/test_xdp_meta.sh b/tools/testing/selftests/bpf/test_xdp_meta.sh
index 307aa856cee3..637fcf4fe4e3 100755
--- a/tools/testing/selftests/bpf/test_xdp_meta.sh
+++ b/tools/testing/selftests/bpf/test_xdp_meta.sh
@@ -9,6 +9,7 @@ cleanup()
fi
set +e
+ ip link del veth1 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh
index 344a3656dea6..c4b17e08d431 100755
--- a/tools/testing/selftests/bpf/test_xdp_redirect.sh
+++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh
@@ -19,6 +19,8 @@ cleanup()
fi
set +e
+ ip link del veth1 2> /dev/null
+ ip link del veth2 2> /dev/null
ip netns del ns1 2> /dev/null
ip netns del ns2 2> /dev/null
}
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
index 589d52b211b7..27a54a17da65 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
@@ -29,6 +29,12 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
# filter by *, end match
ftrace_filter_check 'schedule*' '^schedule.*$'
+# filter by *mid*end
+ftrace_filter_check '*aw*lock' '.*aw.*lock$'
+
+# filter by start*mid*
+ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+
# Advanced full-glob matching feature is recently supported.
# Skip the tests if we are sure the kernel does not support it.
if grep -q 'accepts: .* glob-matching-pattern' README ; then
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
index 0f3f92622e33..68e7a48f5828 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
@@ -128,6 +128,43 @@ if check_set_ftrace_filter "$FUNC1" "$FUNC2" ; then
fail "Expected $FUNC1 and $FUNC2"
fi
+test_actual() { # Compares $TMPDIR/expected with set_ftrace_filter
+ cat set_ftrace_filter | grep -v '#' | cut -d' ' -f1 | cut -d':' -f1 | sort -u > $TMPDIR/actual
+ DIFF=`diff $TMPDIR/actual $TMPDIR/expected`
+ test -z "$DIFF"
+}
+
+# Set traceoff trigger for all fuctions with "lock" in their name
+cat available_filter_functions | cut -d' ' -f1 | grep 'lock' | sort -u > $TMPDIR/expected
+echo '*lock*:traceoff' > set_ftrace_filter
+test_actual
+
+# now remove all with 'try' in it, and end with lock
+grep -v 'try.*lock$' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!*try*lock:traceoff' >> set_ftrace_filter
+test_actual
+
+# remove all that start with "m" and end with "lock"
+grep -v '^m.*lock$' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!m*lock:traceoff' >> set_ftrace_filter
+test_actual
+
+# remove all that start with "c" and have "unlock"
+grep -v '^c.*unlock' $TMPDIR/expected > $TMPDIR/expected2
+mv $TMPDIR/expected2 $TMPDIR/expected
+echo '!c*unlock*:traceoff' >> set_ftrace_filter
+test_actual
+
+# clear all the rest
+> $TMPDIR/expected
+echo '!*:traceoff' >> set_ftrace_filter
+test_actual
+
+rm $TMPDIR/expected
+rm $TMPDIR/actual
+
do_reset
exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
index f2019b37370d..df3dd7fe5f9b 100644
--- a/tools/testing/selftests/ftrace/test.d/functions
+++ b/tools/testing/selftests/ftrace/test.d/functions
@@ -37,17 +37,21 @@ reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
if [ "$tr" = "" ]; then
continue
fi
+ if ! grep -q "$t" set_ftrace_filter; then
+ continue;
+ fi
+ name=`echo $t | cut -d: -f1 | cut -d' ' -f1`
if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
- tr=`echo $t | cut -d: -f1-4`
+ tr=`echo $t | cut -d: -f2-4`
limit=`echo $t | cut -d: -f5`
else
- tr=`echo $t | cut -d: -f1-2`
+ tr=`echo $t | cut -d: -f2`
limit=`echo $t | cut -d: -f3`
fi
if [ "$limit" != "unlimited" ]; then
tr="$tr:$limit"
fi
- echo "!$tr" > set_ftrace_filter
+ echo "!$name:$tr" > set_ftrace_filter
done
}
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index 4a8217448f20..cad14cd0ea92 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -21,6 +21,7 @@
#include <sys/epoll.h>
#include <sys/types.h>
#include <sys/socket.h>
+#include <sys/resource.h>
#include <unistd.h>
#ifndef ARRAY_SIZE
@@ -190,11 +191,14 @@ static void send_from(struct test_params p, uint16_t sport, char *buf,
struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
struct sockaddr * const daddr =
new_loopback_sockaddr(p.send_family, p.recv_port);
- const int fd = socket(p.send_family, p.protocol, 0);
+ const int fd = socket(p.send_family, p.protocol, 0), one = 1;
if (fd < 0)
error(1, errno, "failed to create send socket");
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
+ error(1, errno, "failed to set reuseaddr");
+
if (bind(fd, saddr, sockaddr_size()))
error(1, errno, "failed to bind send socket");
@@ -433,6 +437,21 @@ void enable_fastopen(void)
}
}
+static struct rlimit rlim_old, rlim_new;
+
+static __attribute__((constructor)) void main_ctor(void)
+{
+ getrlimit(RLIMIT_MEMLOCK, &rlim_old);
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+}
+
+static __attribute__((destructor)) void main_dtor(void)
+{
+ setrlimit(RLIMIT_MEMLOCK, &rlim_old);
+}
+
int main(void)
{
fprintf(stderr, "---- IPv4 UDP ----\n");
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
index 747c5dd47be8..5a41404aaef5 100644
--- a/tools/virtio/ringtest/ring.c
+++ b/tools/virtio/ringtest/ring.c
@@ -84,12 +84,11 @@ void alloc_ring(void)
perror("Unable to allocate ring buffer.\n");
exit(3);
}
- event = malloc(sizeof *event);
+ event = calloc(1, sizeof(*event));
if (!event) {
perror("Unable to allocate event buffer.\n");
exit(3);
}
- memset(event, 0, sizeof *event);
guest.avail_idx = 0;
guest.kicked_avail_idx = -1;
guest.last_used_idx = 0;
@@ -102,12 +101,11 @@ void alloc_ring(void)
ring[i] = desc;
}
guest.num_free = ring_size;
- data = malloc(ring_size * sizeof *data);
+ data = calloc(ring_size, sizeof(*data));
if (!data) {
perror("Unable to allocate data buffer.\n");
exit(3);
}
- memset(data, 0, ring_size * sizeof *data);
}
/* guest side */
@@ -188,16 +186,18 @@ bool enable_call()
void kick_available(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier C (for pairing) */
smp_mb();
- if (!need_event(event->kick_index,
- guest.avail_idx,
- guest.kicked_avail_idx))
- return;
+ need = need_event(event->kick_index,
+ guest.avail_idx,
+ guest.kicked_avail_idx);
guest.kicked_avail_idx = guest.avail_idx;
- kick();
+ if (need)
+ kick();
}
/* host side */
@@ -253,14 +253,18 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier D (for pairing) */
smp_mb();
- if (!need_event(event->call_index,
+
+ need = need_event(event->call_index,
host.used_idx,
- host.called_used_idx))
- return;
+ host.called_used_idx);
host.called_used_idx = host.used_idx;
- call();
+
+ if (need)
+ call();
}
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
index bbc3043b2fb1..5fd3fbcb9e57 100644
--- a/tools/virtio/ringtest/virtio_ring_0_9.c
+++ b/tools/virtio/ringtest/virtio_ring_0_9.c
@@ -225,16 +225,18 @@ bool enable_call()
void kick_available(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier C (for pairing) */
smp_mb();
- if (!vring_need_event(vring_avail_event(&ring),
- guest.avail_idx,
- guest.kicked_avail_idx))
- return;
+ need = vring_need_event(vring_avail_event(&ring),
+ guest.avail_idx,
+ guest.kicked_avail_idx);
guest.kicked_avail_idx = guest.avail_idx;
- kick();
+ if (need)
+ kick();
}
/* host side */
@@ -316,14 +318,16 @@ bool use_buf(unsigned *lenp, void **bufp)
void call_used(void)
{
+ bool need;
+
/* Flush in previous flags write */
/* Barrier D (for pairing) */
smp_mb();
- if (!vring_need_event(vring_used_event(&ring),
- host.used_idx,
- host.called_used_idx))
- return;
+ need = vring_need_event(vring_used_event(&ring),
+ host.used_idx,
+ host.called_used_idx);
host.called_used_idx = host.used_idx;
- call();
+ if (need)
+ call();
}
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 70691c08e1ed..cca7e065a075 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -51,3 +51,6 @@ config KVM_COMPAT
config HAVE_KVM_IRQ_BYPASS
bool
+
+config HAVE_KVM_VCPU_ASYNC_IOCTL
+ bool
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index cc29a8148328..70268c0bec79 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -92,7 +92,6 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
{
struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
struct arch_timer_context *vtimer;
- u32 cnt_ctl;
/*
* We may see a timer interrupt after vcpu_put() has been called which
@@ -104,15 +103,11 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
vtimer = vcpu_vtimer(vcpu);
- if (!vtimer->irq.level) {
- cnt_ctl = read_sysreg_el0(cntv_ctl);
- cnt_ctl &= ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT |
- ARCH_TIMER_CTRL_IT_MASK;
- if (cnt_ctl == (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
- kvm_timer_update_irq(vcpu, true, vtimer);
- }
+ if (kvm_timer_should_fire(vtimer))
+ kvm_timer_update_irq(vcpu, true, vtimer);
- if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ if (static_branch_unlikely(&userspace_irqchip_in_use) &&
+ unlikely(!irqchip_in_kernel(vcpu->kvm)))
kvm_vtimer_update_mask_user(vcpu);
return IRQ_HANDLED;
@@ -238,6 +233,16 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
+ if (timer_ctx->loaded) {
+ u32 cnt_ctl;
+
+ /* Only the virtual timer can be loaded so far */
+ cnt_ctl = read_sysreg_el0(cntv_ctl);
+ return (cnt_ctl & ARCH_TIMER_CTRL_ENABLE) &&
+ (cnt_ctl & ARCH_TIMER_CTRL_IT_STAT) &&
+ !(cnt_ctl & ARCH_TIMER_CTRL_IT_MASK);
+ }
+
if (!kvm_timer_irq_can_fire(timer_ctx))
return false;
@@ -252,15 +257,7 @@ bool kvm_timer_is_pending(struct kvm_vcpu *vcpu)
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
- if (vtimer->irq.level || ptimer->irq.level)
- return true;
-
- /*
- * When this is called from withing the wait loop of kvm_vcpu_block(),
- * the software view of the timer state is up to date (timer->loaded
- * is false), and so we can simply check if the timer should fire now.
- */
- if (!vtimer->loaded && kvm_timer_should_fire(vtimer))
+ if (kvm_timer_should_fire(vtimer))
return true;
return kvm_timer_should_fire(ptimer);
@@ -278,9 +275,9 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu)
/* Populate the device bitmap with the timer states */
regs->device_irq_level &= ~(KVM_ARM_DEV_EL1_VTIMER |
KVM_ARM_DEV_EL1_PTIMER);
- if (vtimer->irq.level)
+ if (kvm_timer_should_fire(vtimer))
regs->device_irq_level |= KVM_ARM_DEV_EL1_VTIMER;
- if (ptimer->irq.level)
+ if (kvm_timer_should_fire(ptimer))
regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER;
}
@@ -293,7 +290,8 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq,
timer_ctx->irq.level);
- if (likely(irqchip_in_kernel(vcpu->kvm))) {
+ if (!static_branch_unlikely(&userspace_irqchip_in_use) ||
+ likely(irqchip_in_kernel(vcpu->kvm))) {
ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
timer_ctx->irq.irq,
timer_ctx->irq.level,
@@ -331,12 +329,20 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
struct arch_timer_context *ptimer = vcpu_ptimer(vcpu);
+ bool level;
if (unlikely(!timer->enabled))
return;
- if (kvm_timer_should_fire(vtimer) != vtimer->irq.level)
- kvm_timer_update_irq(vcpu, !vtimer->irq.level, vtimer);
+ /*
+ * The vtimer virtual interrupt is a 'mapped' interrupt, meaning part
+ * of its lifecycle is offloaded to the hardware, and we therefore may
+ * not have lowered the irq.level value before having to signal a new
+ * interrupt, but have to signal an interrupt every time the level is
+ * asserted.
+ */
+ level = kvm_timer_should_fire(vtimer);
+ kvm_timer_update_irq(vcpu, level, vtimer);
if (kvm_timer_should_fire(ptimer) != ptimer->irq.level)
kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer);
@@ -344,6 +350,12 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
phys_timer_emulate(vcpu);
}
+static void __timer_snapshot_state(struct arch_timer_context *timer)
+{
+ timer->cnt_ctl = read_sysreg_el0(cntv_ctl);
+ timer->cnt_cval = read_sysreg_el0(cntv_cval);
+}
+
static void vtimer_save_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -355,10 +367,8 @@ static void vtimer_save_state(struct kvm_vcpu *vcpu)
if (!vtimer->loaded)
goto out;
- if (timer->enabled) {
- vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
- }
+ if (timer->enabled)
+ __timer_snapshot_state(vtimer);
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
@@ -456,8 +466,7 @@ static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu)
bool phys_active;
int ret;
- phys_active = vtimer->irq.level ||
- kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
+ phys_active = kvm_vgic_map_is_active(vcpu, vtimer->irq.irq);
ret = irq_set_irqchip_state(host_vtimer_irq,
IRQCHIP_STATE_ACTIVE,
@@ -504,8 +513,8 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu)
vlevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_VTIMER;
plevel = sregs->device_irq_level & KVM_ARM_DEV_EL1_PTIMER;
- return vtimer->irq.level != vlevel ||
- ptimer->irq.level != plevel;
+ return kvm_timer_should_fire(vtimer) != vlevel ||
+ kvm_timer_should_fire(ptimer) != plevel;
}
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
@@ -537,54 +546,27 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
set_cntvoff(0);
}
-static void unmask_vtimer_irq(struct kvm_vcpu *vcpu)
+/*
+ * With a userspace irqchip we have to check if the guest de-asserted the
+ * timer and if so, unmask the timer irq signal on the host interrupt
+ * controller to ensure that we see future timer signals.
+ */
+static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu)
{
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
- kvm_vtimer_update_mask_user(vcpu);
- return;
- }
-
- /*
- * If the guest disabled the timer without acking the interrupt, then
- * we must make sure the physical and virtual active states are in
- * sync by deactivating the physical interrupt, because otherwise we
- * wouldn't see the next timer interrupt in the host.
- */
- if (!kvm_vgic_map_is_active(vcpu, vtimer->irq.irq)) {
- int ret;
- ret = irq_set_irqchip_state(host_vtimer_irq,
- IRQCHIP_STATE_ACTIVE,
- false);
- WARN_ON(ret);
+ __timer_snapshot_state(vtimer);
+ if (!kvm_timer_should_fire(vtimer)) {
+ kvm_timer_update_irq(vcpu, false, vtimer);
+ kvm_vtimer_update_mask_user(vcpu);
+ }
}
}
-/**
- * kvm_timer_sync_hwstate - sync timer state from cpu
- * @vcpu: The vcpu pointer
- *
- * Check if any of the timers have expired while we were running in the guest,
- * and inject an interrupt if that was the case.
- */
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
-
- /*
- * If we entered the guest with the vtimer output asserted we have to
- * check if the guest has modified the timer so that we should lower
- * the line at this point.
- */
- if (vtimer->irq.level) {
- vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl);
- vtimer->cnt_cval = read_sysreg_el0(cntv_cval);
- if (!kvm_timer_should_fire(vtimer)) {
- kvm_timer_update_irq(vcpu, false, vtimer);
- unmask_vtimer_irq(vcpu);
- }
- }
+ unmask_vtimer_irq_user(vcpu);
}
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -818,6 +800,19 @@ static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
return true;
}
+bool kvm_arch_timer_get_input_level(int vintid)
+{
+ struct kvm_vcpu *vcpu = kvm_arm_get_running_vcpu();
+ struct arch_timer_context *timer;
+
+ if (vintid == vcpu_vtimer(vcpu)->irq.irq)
+ timer = vcpu_vtimer(vcpu);
+ else
+ BUG(); /* We only map the vtimer so far */
+
+ return kvm_timer_should_fire(timer);
+}
+
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -839,7 +834,8 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
- ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
+ ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq,
+ kvm_arch_timer_get_input_level);
if (ret)
return ret;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 08464b2fba1d..86941f6181bb 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -31,6 +31,7 @@
#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/arm_pmu.h>
+#include <kvm/arm_psci.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -46,7 +47,6 @@
#include <asm/kvm_mmu.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
-#include <asm/kvm_psci.h>
#include <asm/sections.h>
#ifdef REQUIRES_VIRT
@@ -71,17 +71,17 @@ static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
- BUG_ON(preemptible());
__this_cpu_write(kvm_arm_running_vcpu, vcpu);
}
+DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+
/**
* kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
* Must be called from non-preemptible context
*/
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
- BUG_ON(preemptible());
return __this_cpu_read(kvm_arm_running_vcpu);
}
@@ -295,6 +295,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
+ if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ static_branch_dec(&userspace_irqchip_in_use);
+
kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
@@ -381,17 +384,24 @@ static void vcpu_power_off(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ vcpu_load(vcpu);
+
if (vcpu->arch.power_off)
mp_state->mp_state = KVM_MP_STATE_STOPPED;
else
mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+ vcpu_put(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int ret = 0;
+
+ vcpu_load(vcpu);
+
switch (mp_state->mp_state) {
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.power_off = false;
@@ -400,10 +410,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu_power_off(vcpu);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+ vcpu_put(vcpu);
+ return ret;
}
/**
@@ -524,14 +535,22 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
- /*
- * Map the VGIC hardware resources before running a vcpu the first
- * time on this VM.
- */
- if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
- ret = kvm_vgic_map_resources(kvm);
- if (ret)
- return ret;
+ if (likely(irqchip_in_kernel(kvm))) {
+ /*
+ * Map the VGIC hardware resources before running a vcpu the
+ * first time on this VM.
+ */
+ if (unlikely(!vgic_ready(kvm))) {
+ ret = kvm_vgic_map_resources(kvm);
+ if (ret)
+ return ret;
+ }
+ } else {
+ /*
+ * Tell the rest of the code that there are userspace irqchip
+ * VMs in the wild.
+ */
+ static_branch_inc(&userspace_irqchip_in_use);
}
ret = kvm_timer_enable(vcpu);
@@ -619,21 +638,27 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
+ vcpu_load(vcpu);
+
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
- return ret;
+ goto out;
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret)
- return ret;
- if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
- return 0;
+ goto out;
+ if (kvm_arm_handle_step_debug(vcpu, vcpu->run)) {
+ ret = 0;
+ goto out;
+ }
}
- if (run->immediate_exit)
- return -EINTR;
+ if (run->immediate_exit) {
+ ret = -EINTR;
+ goto out;
+ }
kvm_sigset_activate(vcpu);
@@ -666,19 +691,30 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_vgic_flush_hwstate(vcpu);
/*
- * If we have a singal pending, or need to notify a userspace
- * irqchip about timer or PMU level changes, then we exit (and
- * update the timer level state in kvm_timer_update_run
- * below).
+ * Exit if we have a signal pending so that we can deliver the
+ * signal to user space.
*/
- if (signal_pending(current) ||
- kvm_timer_should_notify_user(vcpu) ||
- kvm_pmu_should_notify_user(vcpu)) {
+ if (signal_pending(current)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
/*
+ * If we're using a userspace irqchip, then check if we need
+ * to tell a userspace irqchip about timer or PMU level
+ * changes and if so, exit to userspace (the actual level
+ * state gets updated in kvm_timer_update_run and
+ * kvm_pmu_update_run below).
+ */
+ if (static_branch_unlikely(&userspace_irqchip_in_use)) {
+ if (kvm_timer_should_notify_user(vcpu) ||
+ kvm_pmu_should_notify_user(vcpu)) {
+ ret = -EINTR;
+ run->exit_reason = KVM_EXIT_INTR;
+ }
+ }
+
+ /*
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
* See the comment in kvm_vcpu_exiting_guest_mode() and
@@ -690,7 +726,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_request_pending(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
kvm_pmu_sync_hwstate(vcpu);
- kvm_timer_sync_hwstate(vcpu);
+ if (static_branch_unlikely(&userspace_irqchip_in_use))
+ kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
preempt_enable();
@@ -738,7 +775,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* we don't want vtimer interrupts to race with syncing the
* timer virtual interrupt state.
*/
- kvm_timer_sync_hwstate(vcpu);
+ if (static_branch_unlikely(&userspace_irqchip_in_use))
+ kvm_timer_sync_hwstate(vcpu);
/*
* We may have taken a host interrupt in HYP mode (ie
@@ -779,6 +817,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_sigset_deactivate(vcpu);
+out:
+ vcpu_put(vcpu);
return ret;
}
@@ -994,66 +1034,88 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
struct kvm_device_attr attr;
+ long r;
+
+ vcpu_load(vcpu);
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init;
+ r = -EFAULT;
if (copy_from_user(&init, argp, sizeof(init)))
- return -EFAULT;
+ break;
- return kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+ break;
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
+ r = -ENOEXEC;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
+ break;
+ r = -EFAULT;
if (copy_from_user(&reg, argp, sizeof(reg)))
- return -EFAULT;
+ break;
+
if (ioctl == KVM_SET_ONE_REG)
- return kvm_arm_set_reg(vcpu, &reg);
+ r = kvm_arm_set_reg(vcpu, &reg);
else
- return kvm_arm_get_reg(vcpu, &reg);
+ r = kvm_arm_get_reg(vcpu, &reg);
+ break;
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
+ r = -ENOEXEC;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
+ break;
+ r = -EFAULT;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
- return -EFAULT;
+ break;
n = reg_list.n;
reg_list.n = kvm_arm_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
- return -EFAULT;
+ break;
+ r = -E2BIG;
if (n < reg_list.n)
- return -E2BIG;
- return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+ break;
}
case KVM_SET_DEVICE_ATTR: {
+ r = -EFAULT;
if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_set_attr(vcpu, &attr);
+ break;
+ r = kvm_arm_vcpu_set_attr(vcpu, &attr);
+ break;
}
case KVM_GET_DEVICE_ATTR: {
+ r = -EFAULT;
if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_get_attr(vcpu, &attr);
+ break;
+ r = kvm_arm_vcpu_get_attr(vcpu, &attr);
+ break;
}
case KVM_HAS_DEVICE_ATTR: {
+ r = -EFAULT;
if (copy_from_user(&attr, argp, sizeof(attr)))
- return -EFAULT;
- return kvm_arm_vcpu_has_attr(vcpu, &attr);
+ break;
+ r = kvm_arm_vcpu_has_attr(vcpu, &attr);
+ break;
}
default:
- return -EINVAL;
+ r = -EINVAL;
}
+
+ vcpu_put(vcpu);
+ return r;
}
/**
@@ -1246,6 +1308,7 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
cpu_hyp_reset();
return NOTIFY_OK;
+ case CPU_PM_ENTER_FAILED:
case CPU_PM_EXIT:
if (__this_cpu_read(kvm_arm_hardware_enabled))
/* The hardware was enabled before suspend. */
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index d7fd46fe9efb..4fe6e797e8b3 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -21,6 +21,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index f8eaf86b740a..ec62d1cccab7 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -924,6 +924,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return 0;
}
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+{
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pmdp = stage2_get_pmd(kvm, NULL, addr);
+ if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
+ return false;
+
+ if (pmd_thp_or_huge(*pmdp))
+ return kvm_s2pmd_exec(pmdp);
+
+ ptep = pte_offset_kernel(pmdp, addr);
+ if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
+ return false;
+
+ return kvm_s2pte_exec(ptep);
+}
+
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte,
unsigned long flags)
@@ -1255,10 +1274,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
- unsigned long size)
+static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
{
- __coherent_cache_guest_page(vcpu, pfn, size);
+ __clean_dcache_guest_page(pfn, size);
+}
+
+static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
+{
+ __invalidate_icache_guest_page(pfn, size);
}
static void kvm_send_hwpoison_signal(unsigned long address,
@@ -1284,7 +1307,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long fault_status)
{
int ret;
- bool write_fault, writable, hugetlb = false, force_pte = false;
+ bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -1296,7 +1319,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
unsigned long flags = 0;
write_fault = kvm_is_write_fault(vcpu);
- if (fault_status == FSC_PERM && !write_fault) {
+ exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
+ VM_BUG_ON(write_fault && exec_fault);
+
+ if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
kvm_err("Unexpected L2 read permission error\n");
return -EFAULT;
}
@@ -1389,7 +1415,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
new_pmd = kvm_s2pmd_mkwrite(new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
+
+ if (fault_status != FSC_PERM)
+ clean_dcache_guest_page(pfn, PMD_SIZE);
+
+ if (exec_fault) {
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+ invalidate_icache_guest_page(pfn, PMD_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+ }
+
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1399,7 +1437,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn);
}
- coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
+
+ if (fault_status != FSC_PERM)
+ clean_dcache_guest_page(pfn, PAGE_SIZE);
+
+ if (exec_fault) {
+ new_pte = kvm_s2pte_mkexec(new_pte);
+ invalidate_icache_guest_page(pfn, PAGE_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+ new_pte = kvm_s2pte_mkexec(new_pte);
+ }
+
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index f1e363bab5e8..6919352cbf15 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -15,16 +15,16 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/arm-smccc.h>
#include <linux/preempt.h>
#include <linux/kvm_host.h>
#include <linux/wait.h>
#include <asm/cputype.h>
#include <asm/kvm_emulate.h>
-#include <asm/kvm_psci.h>
#include <asm/kvm_host.h>
-#include <uapi/linux/psci.h>
+#include <kvm/arm_psci.h>
/*
* This is an implementation of the Power State Coordination Interface
@@ -33,6 +33,38 @@
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
+static u32 smccc_get_function(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 0);
+}
+
+static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 1);
+}
+
+static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 2);
+}
+
+static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 3);
+}
+
+static void smccc_set_retval(struct kvm_vcpu *vcpu,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ vcpu_set_reg(vcpu, 0, a0);
+ vcpu_set_reg(vcpu, 1, a1);
+ vcpu_set_reg(vcpu, 2, a2);
+ vcpu_set_reg(vcpu, 3, a3);
+}
+
static unsigned long psci_affinity_mask(unsigned long affinity_level)
{
if (affinity_level <= 3)
@@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
unsigned long context_id;
phys_addr_t target_pc;
- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
+ cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
if (vcpu_mode_is_32bit(source_vcpu))
cpu_id &= ~((u32) 0);
@@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
if (!vcpu->arch.power_off) {
- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
+ if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
else
return PSCI_RET_INVALID_PARAMS;
}
- target_pc = vcpu_get_reg(source_vcpu, 2);
- context_id = vcpu_get_reg(source_vcpu, 3);
+ target_pc = smccc_get_arg2(source_vcpu);
+ context_id = smccc_get_arg3(source_vcpu);
kvm_reset_vcpu(vcpu);
@@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
* NOTE: We always update r0 (or x0) because for PSCI v0.1
* the general puspose registers are undefined upon CPU_ON.
*/
- vcpu_set_reg(vcpu, 0, context_id);
+ smccc_set_retval(vcpu, context_id, 0, 0, 0);
vcpu->arch.power_off = false;
smp_mb(); /* Make sure the above is visible */
@@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
- target_affinity = vcpu_get_reg(vcpu, 1);
- lowest_affinity_level = vcpu_get_reg(vcpu, 2);
+ target_affinity = smccc_get_arg1(vcpu);
+ lowest_affinity_level = smccc_get_arg2(vcpu);
/* Determine target affinity mask */
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
}
-int kvm_psci_version(struct kvm_vcpu *vcpu)
-{
- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
- return KVM_ARM_PSCI_0_2;
-
- return KVM_ARM_PSCI_0_1;
-}
-
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
int ret = 1;
@@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
* Bits[31:16] = Major Version = 0
* Bits[15:0] = Minor Version = 2
*/
- val = 2;
+ val = KVM_ARM_PSCI_0_2;
break;
case PSCI_0_2_FN_CPU_SUSPEND:
case PSCI_0_2_FN64_CPU_SUSPEND:
@@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
break;
}
- vcpu_set_reg(vcpu, 0, val);
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return ret;
+}
+
+static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
+{
+ u32 psci_fn = smccc_get_function(vcpu);
+ u32 feature;
+ unsigned long val;
+ int ret = 1;
+
+ switch(psci_fn) {
+ case PSCI_0_2_FN_PSCI_VERSION:
+ val = KVM_ARM_PSCI_1_0;
+ break;
+ case PSCI_1_0_FN_PSCI_FEATURES:
+ feature = smccc_get_arg1(vcpu);
+ switch(feature) {
+ case PSCI_0_2_FN_PSCI_VERSION:
+ case PSCI_0_2_FN_CPU_SUSPEND:
+ case PSCI_0_2_FN64_CPU_SUSPEND:
+ case PSCI_0_2_FN_CPU_OFF:
+ case PSCI_0_2_FN_CPU_ON:
+ case PSCI_0_2_FN64_CPU_ON:
+ case PSCI_0_2_FN_AFFINITY_INFO:
+ case PSCI_0_2_FN64_AFFINITY_INFO:
+ case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ case PSCI_0_2_FN_SYSTEM_OFF:
+ case PSCI_0_2_FN_SYSTEM_RESET:
+ case PSCI_1_0_FN_PSCI_FEATURES:
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = 0;
+ break;
+ default:
+ val = PSCI_RET_NOT_SUPPORTED;
+ break;
+ }
+ break;
+ default:
+ return kvm_psci_0_2_call(vcpu);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
return ret;
}
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
+ u32 psci_fn = smccc_get_function(vcpu);
unsigned long val;
switch (psci_fn) {
@@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
break;
}
- vcpu_set_reg(vcpu, 0, val);
+ smccc_set_retval(vcpu, val, 0, 0, 0);
return 1;
}
@@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
* Errors:
* -EINVAL: Unrecognized PSCI function
*/
-int kvm_psci_call(struct kvm_vcpu *vcpu)
+static int kvm_psci_call(struct kvm_vcpu *vcpu)
{
- switch (kvm_psci_version(vcpu)) {
+ switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+ case KVM_ARM_PSCI_1_0:
+ return kvm_psci_1_0_call(vcpu);
case KVM_ARM_PSCI_0_2:
return kvm_psci_0_2_call(vcpu);
case KVM_ARM_PSCI_0_1:
@@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
return -EINVAL;
};
}
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
+{
+ u32 func_id = smccc_get_function(vcpu);
+ u32 val = PSCI_RET_NOT_SUPPORTED;
+ u32 feature;
+
+ switch (func_id) {
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = ARM_SMCCC_VERSION_1_1;
+ break;
+ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+ feature = smccc_get_arg1(vcpu);
+ switch(feature) {
+ case ARM_SMCCC_ARCH_WORKAROUND_1:
+ if (kvm_arm_harden_branch_predictor())
+ val = 0;
+ break;
+ }
+ break;
+ default:
+ return kvm_psci_call(vcpu);
+ }
+
+ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+}
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 8e633bd9cc1e..465095355666 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -1034,10 +1034,8 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
device = vgic_its_alloc_device(its, device_id, itt_addr,
num_eventid_bits);
- if (IS_ERR(device))
- return PTR_ERR(device);
- return 0;
+ return PTR_ERR_OR_ZERO(device);
}
/*
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index deb51ee16a3d..83d82bd7dc4e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -16,6 +16,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/iodev.h>
+#include <kvm/arm_arch_timer.h>
#include <kvm/arm_vgic.h>
#include "vgic.h"
@@ -122,10 +123,43 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
return value;
}
+/*
+ * This function will return the VCPU that performed the MMIO access and
+ * trapped from within the VM, and will return NULL if this is a userspace
+ * access.
+ *
+ * We can disable preemption locally around accessing the per-CPU variable,
+ * and use the resolved vcpu pointer after enabling preemption again, because
+ * even if the current thread is migrated to another CPU, reading the per-CPU
+ * value later will give us the same value as we update the per-CPU variable
+ * in the preempt notifier handlers.
+ */
+static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
+{
+ struct kvm_vcpu *vcpu;
+
+ preempt_disable();
+ vcpu = kvm_arm_get_running_vcpu();
+ preempt_enable();
+ return vcpu;
+}
+
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->pending_latch = true;
+ vgic_irq_set_phys_active(irq, true);
+}
+
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ bool is_uaccess = !vgic_get_mmio_requester_vcpu();
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
@@ -134,17 +168,45 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock_irqsave(&irq->irq_lock, flags);
- irq->pending_latch = true;
-
+ if (irq->hw)
+ vgic_hw_irq_spending(vcpu, irq, is_uaccess);
+ else
+ irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->pending_latch = false;
+
+ /*
+ * We don't want the guest to effectively mask the physical
+ * interrupt by doing a write to SPENDR followed by a write to
+ * CPENDR for HW interrupts, so we clear the active state on
+ * the physical side if the virtual interrupt is not active.
+ * This may lead to taking an additional interrupt on the
+ * host, but that should not be a problem as the worst that
+ * can happen is an additional vgic injection. We also clear
+ * the pending state to maintain proper semantics for edge HW
+ * interrupts.
+ */
+ vgic_irq_set_phys_pending(irq, false);
+ if (!irq->active)
+ vgic_irq_set_phys_active(irq, false);
+}
+
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
+ bool is_uaccess = !vgic_get_mmio_requester_vcpu();
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
@@ -154,7 +216,10 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
spin_lock_irqsave(&irq->irq_lock, flags);
- irq->pending_latch = false;
+ if (irq->hw)
+ vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
+ else
+ irq->pending_latch = false;
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
@@ -181,27 +246,24 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
return value;
}
+/* Must be called with irq->irq_lock held */
+static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool active, bool is_uaccess)
+{
+ if (is_uaccess)
+ return;
+
+ irq->active = active;
+ vgic_irq_set_phys_active(irq, active);
+}
+
static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
- bool new_active_state)
+ bool active)
{
- struct kvm_vcpu *requester_vcpu;
unsigned long flags;
- spin_lock_irqsave(&irq->irq_lock, flags);
+ struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
- /*
- * The vcpu parameter here can mean multiple things depending on how
- * this function is called; when handling a trap from the kernel it
- * depends on the GIC version, and these functions are also called as
- * part of save/restore from userspace.
- *
- * Therefore, we have to figure out the requester in a reliable way.
- *
- * When accessing VGIC state from user space, the requester_vcpu is
- * NULL, which is fine, because we guarantee that no VCPUs are running
- * when accessing VGIC state from user space so irq->vcpu->cpu is
- * always -1.
- */
- requester_vcpu = kvm_arm_get_running_vcpu();
+ spin_lock_irqsave(&irq->irq_lock, flags);
/*
* If this virtual IRQ was written into a list register, we
@@ -213,14 +275,23 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
* vgic_change_active_prepare) and still has to sync back this IRQ,
* so we release and re-acquire the spin_lock to let the other thread
* sync back the IRQ.
+ *
+ * When accessing VGIC state from user space, requester_vcpu is
+ * NULL, which is fine, because we guarantee that no VCPUs are running
+ * when accessing VGIC state from user space so irq->vcpu->cpu is
+ * always -1.
*/
while (irq->vcpu && /* IRQ may have state in an LR somewhere */
irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
irq->vcpu->cpu != -1) /* VCPU thread is running */
cond_resched_lock(&irq->irq_lock);
- irq->active = new_active_state;
- if (new_active_state)
+ if (irq->hw)
+ vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
+ else
+ irq->active = active;
+
+ if (irq->active)
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
else
spin_unlock_irqrestore(&irq->irq_lock, flags);
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 80897102da26..c32d7b93ffd1 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -105,6 +105,26 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
irq->pending_latch = false;
}
+ /*
+ * Level-triggered mapped IRQs are special because we only
+ * observe rising edges as input to the VGIC.
+ *
+ * If the guest never acked the interrupt we have to sample
+ * the physical line and set the line level, because the
+ * device state could have changed or we simply need to
+ * process the still pending interrupt later.
+ *
+ * If this causes us to lower the level, we have to also clear
+ * the physical active state, since we will otherwise never be
+ * told when the interrupt becomes asserted again.
+ */
+ if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+
+ if (!irq->line_level)
+ vgic_irq_set_phys_active(irq, false);
+ }
+
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -162,6 +182,15 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
val |= GICH_LR_EOI;
}
+ /*
+ * Level-triggered mapped IRQs are special because we only observe
+ * rising edges as input to the VGIC. We therefore lower the line
+ * level here, so that we can take new virtual IRQs. See
+ * vgic_v2_fold_lr_state for more info.
+ */
+ if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
+ irq->line_level = false;
+
/* The GICv2 LR only holds five bits of priority. */
val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index f47e8481fa45..6b329414e57a 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -96,6 +96,26 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
irq->pending_latch = false;
}
+ /*
+ * Level-triggered mapped IRQs are special because we only
+ * observe rising edges as input to the VGIC.
+ *
+ * If the guest never acked the interrupt we have to sample
+ * the physical line and set the line level, because the
+ * device state could have changed or we simply need to
+ * process the still pending interrupt later.
+ *
+ * If this causes us to lower the level, we have to also clear
+ * the physical active state, since we will otherwise never be
+ * told when the interrupt becomes asserted again.
+ */
+ if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+
+ if (!irq->line_level)
+ vgic_irq_set_phys_active(irq, false);
+ }
+
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -146,6 +166,15 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
}
/*
+ * Level-triggered mapped IRQs are special because we only observe
+ * rising edges as input to the VGIC. We therefore lower the line
+ * level here, so that we can take new virtual IRQs. See
+ * vgic_v3_fold_lr_state for more info.
+ */
+ if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
+ irq->line_level = false;
+
+ /*
* We currently only support Group1 interrupts, which is a
* known defect. This needs to be addressed at some point.
*/
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index ecb8e25f5fe5..c7c5ef190afa 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -144,6 +144,38 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
kfree(irq);
}
+void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
+{
+ WARN_ON(irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ pending));
+}
+
+bool vgic_get_phys_line_level(struct vgic_irq *irq)
+{
+ bool line_level;
+
+ BUG_ON(!irq->hw);
+
+ if (irq->get_input_level)
+ return irq->get_input_level(irq->intid);
+
+ WARN_ON(irq_get_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ &line_level));
+ return line_level;
+}
+
+/* Set/Clear the physical active state */
+void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
+{
+
+ BUG_ON(!irq->hw);
+ WARN_ON(irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_ACTIVE,
+ active));
+}
+
/**
* kvm_vgic_target_oracle - compute the target vcpu for an irq
*
@@ -413,7 +445,8 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
/* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
- unsigned int host_irq)
+ unsigned int host_irq,
+ bool (*get_input_level)(int vindid))
{
struct irq_desc *desc;
struct irq_data *data;
@@ -433,6 +466,7 @@ static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
+ irq->get_input_level = get_input_level;
return 0;
}
@@ -441,10 +475,11 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
+ irq->get_input_level = NULL;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid)
+ u32 vintid, bool (*get_input_level)(int vindid))
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
@@ -453,7 +488,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags);
- ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index efbcf8f96f9c..12c37b89f7a3 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -104,6 +104,11 @@ static inline bool irq_is_pending(struct vgic_irq *irq)
return irq->pending_latch || irq->line_level;
}
+static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
+{
+ return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
+}
+
/*
* This struct provides an intermediate representation of the fields contained
* in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
@@ -140,6 +145,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
+bool vgic_get_phys_line_level(struct vgic_irq *irq);
+void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
+void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags);
void vgic_kick_vcpus(struct kvm *kvm);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 001085b611ad..4501e658e8d6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -151,17 +151,12 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
-int vcpu_load(struct kvm_vcpu *vcpu)
+void vcpu_load(struct kvm_vcpu *vcpu)
{
- int cpu;
-
- if (mutex_lock_killable(&vcpu->mutex))
- return -EINTR;
- cpu = get_cpu();
+ int cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
- return 0;
}
EXPORT_SYMBOL_GPL(vcpu_load);
@@ -171,7 +166,6 @@ void vcpu_put(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_put(vcpu);
preempt_notifier_unregister(&vcpu->preempt_notifier);
preempt_enable();
- mutex_unlock(&vcpu->mutex);
}
EXPORT_SYMBOL_GPL(vcpu_put);
@@ -1416,7 +1410,8 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
static int hva_to_pfn_remapped(struct vm_area_struct *vma,
unsigned long addr, bool *async,
- bool write_fault, kvm_pfn_t *p_pfn)
+ bool write_fault, bool *writable,
+ kvm_pfn_t *p_pfn)
{
unsigned long pfn;
int r;
@@ -1442,6 +1437,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
}
+ if (writable)
+ *writable = true;
/*
* Get a reference here because callers of *hva_to_pfn* and
@@ -1507,7 +1504,7 @@ retry:
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
- r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
+ r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
if (r == -EAGAIN)
goto retry;
if (r < 0)
@@ -2400,7 +2397,10 @@ static struct file_operations kvm_vcpu_fops = {
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
+ char name[8 + 1 + ITOA_MAX_LEN + 1];
+
+ snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
+ return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
static int kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
@@ -2532,19 +2532,16 @@ static long kvm_vcpu_ioctl(struct file *filp,
if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
return -EINVAL;
-#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
/*
- * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
- * so vcpu_load() would break it.
+ * Some architectures have vcpu ioctls that are asynchronous to vcpu
+ * execution; mutex_lock() would break them.
*/
- if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_S390_IRQ || ioctl == KVM_INTERRUPT)
- return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
-#endif
-
-
- r = vcpu_load(vcpu);
- if (r)
+ r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
+ if (r != -ENOIOCTLCMD)
return r;
+
+ if (mutex_lock_killable(&vcpu->mutex))
+ return -EINTR;
switch (ioctl) {
case KVM_RUN: {
struct pid *oldpid;
@@ -2716,7 +2713,7 @@ out_free1:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
- vcpu_put(vcpu);
+ mutex_unlock(&vcpu->mutex);
kfree(fpu);
kfree(kvm_sregs);
return r;
@@ -3168,21 +3165,18 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
return PTR_ERR(kvm);
#ifdef CONFIG_KVM_MMIO
r = kvm_coalesced_mmio_init(kvm);
- if (r < 0) {
- kvm_put_kvm(kvm);
- return r;
- }
+ if (r < 0)
+ goto put_kvm;
#endif
r = get_unused_fd_flags(O_CLOEXEC);
- if (r < 0) {
- kvm_put_kvm(kvm);
- return r;
- }
+ if (r < 0)
+ goto put_kvm;
+
file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (IS_ERR(file)) {
put_unused_fd(r);
- kvm_put_kvm(kvm);
- return PTR_ERR(file);
+ r = PTR_ERR(file);
+ goto put_kvm;
}
/*
@@ -3200,6 +3194,10 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
fd_install(r, file);
return r;
+
+put_kvm:
+ kvm_put_kvm(kvm);
+ return r;
}
static long kvm_dev_ioctl(struct file *filp,