summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS28
-rw-r--r--arch/arm64/include/asm/exception.h4
-rw-r--r--arch/arm64/include/asm/spinlock.h6
-rw-r--r--arch/arm64/kernel/kaslr.c1
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--crypto/Kconfig4
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/gpio/gpio-bd71828.c10
-rw-r--r--drivers/gpio/gpio-sifive.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c5
-rw-r--r--drivers/gpio/gpiolib.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h46
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c22
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c3
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c44
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c50
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c129
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c354
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c258
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c35
-rw-r--r--drivers/gpu/drm/i915/i915_active.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h7
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c12
-rw-r--r--drivers/gpu/drm/i915/i915_request.c6
-rw-r--r--drivers/gpu/drm/i915/i915_request.h60
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c9
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c2
-rw-r--r--drivers/spmi/spmi-pmic-arb.c4
-rw-r--r--fs/dax.c11
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/pipe_fs_i.h3
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--init/Kconfig1
-rw-r--r--init/main.c37
-rw-r--r--kernel/cgroup/cgroup.c13
-rw-r--r--kernel/trace/trace_events_hist.c227
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bootconfig.c15
-rw-r--r--scripts/kallsyms.c4
-rwxr-xr-xscripts/link-vmlinux.sh2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/ss/sidtab.c12
-rw-r--r--sound/core/pcm_native.c3
-rw-r--r--sound/pci/hda/patch_realtek.c4
-rw-r--r--sound/usb/clock.c91
-rw-r--r--sound/usb/clock.h4
-rw-r--r--sound/usb/format.c36
-rw-r--r--sound/usb/mixer.c12
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/bootconfig/include/linux/memblock.h12
-rw-r--r--tools/bootconfig/include/linux/printk.h2
-rw-r--r--tools/bootconfig/main.c28
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh9
95 files changed, 1498 insertions, 562 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 38fe2f3f7b6f..a0d86490c2c6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2796,11 +2796,11 @@ F: drivers/block/aoe/
ATHEROS 71XX/9XXX GPIO DRIVER
M: Alban Bedel <albeu@free.fr>
+S: Maintained
W: https://github.com/AlbanBedel/linux
T: git git://github.com/AlbanBedel/linux
-S: Maintained
-F: drivers/gpio/gpio-ath79.c
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
+F: drivers/gpio/gpio-ath79.c
ATHEROS 71XX/9XXX USB PHY DRIVER
M: Alban Bedel <albeu@free.fr>
@@ -3422,8 +3422,8 @@ BROADCOM BRCMSTB GPIO DRIVER
M: Gregory Fong <gregory.0xf0@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
-F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER
M: Kamal Dasu <kdasu.kdev@gmail.com>
@@ -3481,8 +3481,8 @@ BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
-F: drivers/gpio/gpio-bcm-kona.c
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
+F: drivers/gpio/gpio-bcm-kona.c
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
@@ -3597,8 +3597,8 @@ F: sound/pci/bt87x.c
BT8XXGPIO DRIVER
M: Michael Buesch <m@bues.ch>
-W: http://bu3sch.de/btgpio.php
S: Maintained
+W: http://bu3sch.de/btgpio.php
F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
@@ -7143,18 +7143,18 @@ GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
L: linux-gpio@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+F: Documentation/ABI/obsolete/sysfs-gpio
+F: Documentation/ABI/testing/gpio-cdev
+F: Documentation/admin-guide/gpio/
F: Documentation/devicetree/bindings/gpio/
F: Documentation/driver-api/gpio/
-F: Documentation/admin-guide/gpio/
-F: Documentation/ABI/testing/gpio-cdev
-F: Documentation/ABI/obsolete/sysfs-gpio
F: drivers/gpio/
+F: include/asm-generic/gpio.h
F: include/linux/gpio/
F: include/linux/gpio.h
F: include/linux/of_gpio.h
-F: include/asm-generic/gpio.h
F: include/uapi/linux/gpio.h
F: tools/gpio/
@@ -8055,8 +8055,8 @@ F: drivers/scsi/ips.*
ICH LPC AND GPIO DRIVER
M: Peter Tyser <ptyser@xes-inc.com>
S: Maintained
-F: drivers/mfd/lpc_ich.c
F: drivers/gpio/gpio-ich.c
+F: drivers/mfd/lpc_ich.c
ICY I2C DRIVER
M: Max Staudt <max@enpas.org>
@@ -16075,8 +16075,8 @@ F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
SYNOPSYS CREG GPIO DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
S: Maintained
-F: drivers/gpio/gpio-creg-snps.c
F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
+F: drivers/gpio/gpio-creg-snps.c
SYNOPSYS DESIGNWARE 8250 UART DRIVER
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -16087,8 +16087,8 @@ SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <hoan@os.amperecomputing.com>
L: linux-gpio@vger.kernel.org
S: Maintained
-F: drivers/gpio/gpio-dwapb.c
F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+F: drivers/gpio/gpio-dwapb.c
SYNOPSYS DESIGNWARE AXI DMAC DRIVER
M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
@@ -18414,8 +18414,8 @@ M: Nandor Han <nandor.han@ge.com>
M: Semi Malinen <semi.malinen@ge.com>
L: linux-gpio@vger.kernel.org
S: Maintained
-F: drivers/gpio/gpio-xra1403.c
F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
+F: drivers/gpio/gpio-xra1403.c
XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <jcmvbkbc@gmail.com>
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index b87c6e276ab1..7a6e81ca23a8 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -33,7 +33,6 @@ static inline u32 disr_to_esr(u64 disr)
asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
@@ -47,7 +46,4 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
-void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
- struct pt_regs *regs);
-
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 102404dc1e13..9083d6992603 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -18,6 +18,10 @@
* See:
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
-#define vcpu_is_preempted(cpu) false
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return false;
+}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 53b8a4ee64ff..91a83104c6e8 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/archrandom.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index bbb0f0c145f6..00626057a384 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD))
return;
+ /*
+ * If all CPUs implement the SSBS extension, then we just need to
+ * context-switch the PSTATE field.
+ */
+ if (cpu_have_feature(cpu_feature(SSBS)))
+ return;
+
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
@@ -608,8 +615,6 @@ long get_tagged_addr_ctrl(void)
* only prevents the tagged address ABI enabling via prctl() and does not
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static int zero;
-static int one = 1;
static struct ctl_table tagged_addr_sysctl_table[] = {
{
@@ -618,8 +623,8 @@ static struct ctl_table tagged_addr_sysctl_table[] = {
.data = &tagged_addr_disabled,
.maxlen = sizeof(int),
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 73f06d4b3aae..eebbc8d7123e 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -23,7 +23,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/crypto/Kconfig b/crypto/Kconfig
index cdb51d4272d0..c24a47406f8f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -136,8 +136,6 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-if CRYPTO_MANAGER2
-
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default y
@@ -155,8 +153,6 @@ config CRYPTO_MANAGER_EXTRA_TESTS
This is intended for developer use only, as these tests take much
longer to run than the normal self tests.
-endif # if CRYPTO_MANAGER2
-
config CRYPTO_GF128MUL
tristate
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 26a654dbc69a..0aa4b6bc5101 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -61,7 +61,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
if (!blk_queue_dax(bdev->bd_queue))
return NULL;
- return fs_dax_get_by_host(bdev->bd_disk->disk_name);
+ return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
#endif
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 04aade9e0a4d..3dbbc638e9a9 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -10,16 +10,6 @@
#define GPIO_OUT_REG(off) (BD71828_REG_GPIO_CTRL1 + (off))
#define HALL_GPIO_OFFSET 3
-/*
- * These defines can be removed when
- * "gpio: Add definition for GPIO direction"
- * (9208b1e77d6e8e9776f34f46ef4079ecac9c3c25 in GPIO tree) gets merged,
- */
-#ifndef GPIO_LINE_DIRECTION_IN
- #define GPIO_LINE_DIRECTION_IN 1
- #define GPIO_LINE_DIRECTION_OUT 0
-#endif
-
struct bd71828_gpio {
struct rohm_regmap_dev chip;
struct gpio_chip gpio;
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 147a1bd04515..c54dd08f2cbf 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -35,7 +35,7 @@ struct sifive_gpio {
void __iomem *base;
struct gpio_chip gc;
struct regmap *regs;
- u32 irq_state;
+ unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
unsigned int irq_parent[SIFIVE_GPIO_MAX];
};
@@ -94,7 +94,7 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
/* Enable interrupts */
- assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
+ assign_bit(offset, &chip->irq_state, 1);
sifive_gpio_set_ie(chip, offset);
}
@@ -104,7 +104,7 @@ static void sifive_gpio_irq_disable(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
- assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
+ assign_bit(offset, &chip->irq_state, 0);
sifive_gpio_set_ie(chip, offset);
irq_chip_disable_parent(d);
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index a9748b5198e6..67f9f82e0db0 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
+ /* Once finished with an index write it out to the register */
if (index != xgpio_index(chip, i)) {
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i),
+ index * XGPIO_CHANNEL_OFFSET,
chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
index = xgpio_index(chip, i);
@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
}
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i), chip->gpio_state[index]);
+ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 753283486037..4d0106ceeba7 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3035,13 +3035,33 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
- enum pin_config_param mode)
+static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
if (!gc->set_config)
return -ENOTSUPP;
- return gc->set_config(gc, offset, mode);
+ return gc->set_config(gc, offset, config);
+}
+
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ enum pin_config_param mode)
+{
+ unsigned long config;
+ unsigned arg;
+
+ switch (mode) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ }
+
+ config = PIN_CONF_PACKED(mode, arg);
+ return gpio_do_set_config(gc, offset, config);
}
static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3277,7 +3297,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
chip = desc->gdev->chip;
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3311,7 +3331,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_set_config(chip, gpio, packed);
+ rc = gpio_do_set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 07914e34bc25..1311d6aec5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
- hwc->conf = event->attr.config;
+ hwc->config = event->attr.config;
return 0;
}
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -101,7 +101,7 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
@@ -126,7 +126,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -156,7 +156,8 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 1);
break;
default:
return 0;
@@ -184,7 +185,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d6deb0eb1e15..6fe057329de2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -179,6 +179,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
@@ -190,8 +191,6 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
- struct dpg_pause_state pause_state;
-
bool indirect_sram;
uint8_t num_vcn_inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 90f64b8bc358..b33a4eb39193 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4374,9 +4374,17 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if (!adev->in_suspend) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
+ }
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1a24fadd30e2..71f61afdc655 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1207,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1258,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1318,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.jpeg = new_state->jpeg;
+ adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 4f7216788f11..c387c81f8695 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -1137,9 +1137,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1185,7 +1185,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 70fae7977f8f..2d64ba1adf99 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1367,9 +1367,9 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1407,14 +1407,14 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
- 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
/* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 279541517a99..63e8a12a74bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8408,7 +8408,6 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
/* Calculate number of static frames before generating interrupt to
* enter PSR.
*/
- unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
@@ -8423,8 +8422,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
* Calculate number of frames such that at least 30 ms of time has
* passed.
*/
- if (vsync_rate_hz != 0)
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
params.triggers.cursor_update = true;
params.triggers.overlay_update = true;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 629a07a2719b..c4ba6e84db65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -711,10 +711,6 @@ static void enable_disp_power_gating_dmcub(
power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
- /* ATOM_ENABLE is old API in DMUB */
- if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
- power_gating.power_gating.pwr.enable = ATOM_INIT;
-
dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 3cd283195091..c0f6a8c7de7d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -87,6 +87,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+# prevent build errors regarding soft-float vs hard-float FP ABI tags
+# this code is currently unused on ppc64, as it applies to Renoir APUs only
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
+endif
+
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 495f01e9f2ca..49ce46b543ea 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -117,7 +117,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
+ if ((prev_dppclk_khz > dppclk_khz && safe_to_lower) || prev_dppclk_khz < dppclk_khz) {
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 7ae4c06232dd..9ef3f7b91a1d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -151,6 +151,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ }
+
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
@@ -412,19 +418,19 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f1a5d2c6aa37..68c4049cbc2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -400,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
- if (!is_engine_available(engine))
+ if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cfbbaffa8654..a444fed94184 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -572,7 +572,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 0d506d30d6b6..33d0a176841a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -60,6 +60,7 @@
#include "dcn20/dcn20_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
+#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -856,6 +857,7 @@ static const struct dc_debug_options debug_defaults_diags = {
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -1718,6 +1720,10 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index b2f96a101124..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+enum SMU_11_0_ODFEATURE_CAP {
+ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
+ SMU_11_0_ODCAP_GFXCLK_CURVE,
+ SMU_11_0_ODCAP_UCLK_MAX,
+ SMU_11_0_ODCAP_POWER_LIMIT,
+ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
+ SMU_11_0_ODCAP_FAN_SPEED_MIN,
+ SMU_11_0_ODCAP_TEMPERATURE_FAN,
+ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
+ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
+ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
+ SMU_11_0_ODCAP_FAN_CURVE,
+ SMU_11_0_ODCAP_COUNT,
+};
+
enum SMU_11_0_ODFEATURE_ID {
- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 19a9846b730e..0d73a49166af 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -736,9 +736,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
-static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
{
- return od_table->cap[feature];
+ return od_table->cap[cap];
}
static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
@@ -846,7 +846,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
size += sprintf(buf + size, "OD_SCLK:\n");
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
@@ -854,7 +854,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
size += sprintf(buf + size, "OD_MCLK:\n");
size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
@@ -862,7 +862,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
@@ -887,7 +887,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
size = sprintf(buf, "%s:\n", "OD_RANGE");
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
@@ -896,14 +896,14 @@ static int navi10_print_clk_levels(struct smu_context *smu,
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
&min_value, &max_value);
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
&min_value, &max_value);
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
@@ -2056,7 +2056,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
pr_warn("GFXCLK_LIMITS not supported!\n");
return -ENOTSUPP;
}
@@ -2102,7 +2102,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
pr_warn("UCLK_MAX not supported!\n");
return -ENOTSUPP;
}
@@ -2143,7 +2143,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
pr_warn("GFXCLK_CURVE not supported!\n");
return -ENOTSUPP;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 20cdaf3146b8..cce0b1bba591 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3838,7 +3838,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ if (guid)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 99769d6c9f84..805fb004c8eb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3211,7 +3211,7 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
-static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 8beac06e3f10..ef4017a1baba 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -357,14 +357,16 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
panel_fixed_mode->hdisplay + dtd->hfront_porch;
panel_fixed_mode->hsync_end =
panel_fixed_mode->hsync_start + dtd->hsync;
- panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+ panel_fixed_mode->htotal =
+ panel_fixed_mode->hdisplay + dtd->hblank;
panel_fixed_mode->vdisplay = dtd->vactive;
panel_fixed_mode->vsync_start =
panel_fixed_mode->vdisplay + dtd->vfront_porch;
panel_fixed_mode->vsync_end =
panel_fixed_mode->vsync_start + dtd->vsync;
- panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+ panel_fixed_mode->vtotal =
+ panel_fixed_mode->vdisplay + dtd->vblank;
panel_fixed_mode->clock = dtd->pixel_clock;
panel_fixed_mode->width_mm = dtd->width_mm;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 19ea842cfd84..064dd99bbc49 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -12366,6 +12366,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
+ linked_state->view = plane_state->view;
memcpy(linked_state->color_plane, plane_state->color_plane,
sizeof(linked_state->color_plane));
@@ -14476,37 +14477,23 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
return 0;
}
-static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
- enum transcoder transcoder)
+static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
+ u8 transcoders)
{
- struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- if (new_crtc_state->cpu_transcoder == transcoder)
- return needs_modeset(new_crtc_state);
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ transcoders & BIT(new_crtc_state->cpu_transcoder) &&
+ needs_modeset(new_crtc_state))
+ return true;
+ }
return false;
}
-static void
-intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- u8 transcoders)
-{
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- }
- }
-}
-
static int
intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
{
@@ -14662,15 +14649,20 @@ static int intel_atomic_check(struct drm_device *dev,
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
- if (intel_cpu_transcoder_needs_modeset(state, master)) {
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
- } else if (is_trans_port_sync_mode(new_crtc_state)) {
+ }
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
u8 trans = new_crtc_state->sync_mode_slaves_mask |
BIT(new_crtc_state->master_transcoder);
- intel_modeset_synced_crtcs(state, trans);
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 89fb0d90b694..04f953ba8f00 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -384,6 +384,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
+#ifdef CONFIG_ACPI
static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
{
struct i2c_adapter_lookup *lookup = data;
@@ -393,8 +394,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
acpi_handle adapter_handle;
acpi_status status;
- if (intel_dsi->i2c_bus_num >= 0 ||
- !i2c_acpi_get_i2c_resource(ares, &sb))
+ if (!i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
if (lookup->slave_addr != sb->slave_address)
@@ -413,14 +413,41 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
return 1;
}
-static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
+static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct device *dev = &drm_dev->pdev->dev;
- struct i2c_adapter *adapter;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (acpi_dev) {
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.slave_addr = slave_addr;
+ lookup.intel_dsi = intel_dsi;
+ lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(acpi_dev, &resource_list,
+ i2c_adapter_lookup,
+ &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+ }
+}
+#else
+static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+}
+#endif
+
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
u8 vbt_i2c_bus_num = *(data + 2);
@@ -431,20 +458,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
if (intel_dsi->i2c_bus_num < 0) {
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
-
- acpi_dev = ACPI_COMPANION(dev);
- if (acpi_dev) {
- memset(&lookup, 0, sizeof(lookup));
- lookup.slave_addr = slave_addr;
- lookup.intel_dsi = intel_dsi;
- lookup.dev_handle = acpi_device_handle(acpi_dev);
-
- INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(acpi_dev, &resource_list,
- i2c_adapter_lookup,
- &lookup);
- acpi_dev_free_resource_list(&resource_list);
- }
+ i2c_acpi_find_adapter(intel_dsi, slave_addr);
}
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d5a0f5ae4a8b..60c984e10c4a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1981,9 +1981,20 @@ static int __eb_parse(struct dma_fence_work *work)
pw->trampoline);
}
+static void __eb_parse_release(struct dma_fence_work *work)
+{
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ if (pw->trampoline)
+ i915_active_release(&pw->trampoline->active);
+ i915_active_release(&pw->shadow->active);
+ i915_active_release(&pw->batch->active);
+}
+
static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
+ .release = __eb_parse_release,
};
static int eb_parse_pipeline(struct i915_execbuffer *eb,
@@ -1997,6 +2008,20 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
+ err = i915_active_acquire(&eb->batch->active);
+ if (err)
+ goto err_free;
+
+ err = i915_active_acquire(&shadow->active);
+ if (err)
+ goto err_batch;
+
+ if (trampoline) {
+ err = i915_active_acquire(&trampoline->active);
+ if (err)
+ goto err_shadow;
+ }
+
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
@@ -2006,7 +2031,9 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
pw->shadow = shadow;
pw->trampoline = trampoline;
- dma_resv_lock(pw->batch->resv, NULL);
+ err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
+ if (err)
+ goto err_trampoline;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
@@ -2034,6 +2061,14 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
err_batch_unlock:
dma_resv_unlock(pw->batch->resv);
+err_trampoline:
+ if (trampoline)
+ i915_active_release(&trampoline->active);
+err_shadow:
+ i915_active_release(&shadow->active);
+err_batch:
+ i915_active_release(&eb->batch->active);
+err_free:
kfree(pw);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b9fdac2f9003..0b6a442108de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -455,10 +455,11 @@ out:
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct i915_mmap_offset *mmo;
+ struct i915_mmap_offset *mmo, *mn;
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
@@ -488,6 +489,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
}
static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type)
+{
+ struct rb_node *rb;
+
+ spin_lock(&obj->mmo.lock);
+ rb = obj->mmo.offsets.rb_node;
+ while (rb) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(rb, typeof(*mmo), offset);
+
+ if (mmo->mmap_type == mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ return mmo;
+ }
+
+ if (mmo->mmap_type < mmap_type)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+ spin_unlock(&obj->mmo.lock);
+
+ return NULL;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+ struct rb_node *rb, **p;
+
+ spin_lock(&obj->mmo.lock);
+ rb = NULL;
+ p = &obj->mmo.offsets.rb_node;
+ while (*p) {
+ struct i915_mmap_offset *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), offset);
+
+ if (pos->mmap_type == mmo->mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ return pos;
+ }
+
+ if (pos->mmap_type < mmo->mmap_type)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&mmo->offset, rb, p);
+ rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+}
+
+static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
struct drm_file *file)
@@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
+ mmo = lookup_mmo(obj, mmap_type);
+ if (mmo)
+ goto out;
+
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
mmo->obj = obj;
- mmo->dev = obj->base.dev;
- mmo->file = file;
mmo->mmap_type = mmap_type;
drm_vma_node_reset(&mmo->vma_node);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
- goto out;
+ goto insert;
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto err;
i915_gem_drain_freed_objects(i915);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (err)
goto err;
+insert:
+ mmo = insert_mmo(obj, mmo);
+ GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
-
- spin_lock(&obj->mmo.lock);
- list_add(&mmo->offset, &obj->mmo.offsets);
- spin_unlock(&obj->mmo.lock);
-
return mmo;
err:
@@ -745,60 +807,43 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
struct i915_mmap_offset *mmo = NULL;
- struct drm_gem_object *obj = NULL;
struct file *anon;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
- if (likely(node)) {
- mmo = container_of(node, struct i915_mmap_offset,
- vma_node);
- /*
- * In our dependency chain, the drm_vma_offset_node
- * depends on the validity of the mmo, which depends on
- * the gem object. However the only reference we have
- * at this point is the mmo (as the parent of the node).
- * Try to check if the gem object was at least cleared.
- */
- if (!mmo || !mmo->obj) {
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- return -EINVAL;
- }
+ if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
* destroyed and will be invalid when the vma manager lock
* is released.
*/
- obj = &mmo->obj->base;
- if (!kref_get_unless_zero(&obj->refcount))
- obj = NULL;
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
if (!obj)
- return -EINVAL;
-
- if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
- return -EACCES;
- }
+ return node ? -EACCES : -EINVAL;
- if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+ if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
- drm_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
- anon = mmap_singleton(to_i915(obj->dev));
+ anon = mmap_singleton(to_i915(dev));
if (IS_ERR(anon)) {
- drm_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return PTR_ERR(anon);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..35985218bd85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
@@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
- struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
- if (mmo->file != file)
- continue;
-
- spin_unlock(&obj->mmo.lock);
+ rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
drm_vma_node_revoke(&mmo->vma_node, file);
- spin_lock(&obj->mmo.lock);
- }
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
i915_gem_object_release_mmap(obj);
- list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index db70a3306e59..9c86f2dea947 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -70,14 +70,22 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
}
static inline struct drm_i915_gem_object *
+i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
+{
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+
+ return obj;
+}
+
+static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
struct drm_i915_gem_object *obj;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
+ obj = i915_gem_object_get_rcu(obj);
rcu_read_unlock();
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..f64ad77e6b1e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -71,13 +71,11 @@ enum i915_mmap_type {
};
struct i915_mmap_offset {
- struct drm_device *dev;
struct drm_vma_offset_node vma_node;
struct drm_i915_gem_object *obj;
- struct drm_file *file;
enum i915_mmap_type mmap_type;
- struct list_head offset;
+ struct rb_node offset;
};
struct drm_i915_gem_object {
@@ -137,7 +135,7 @@ struct drm_i915_gem_object {
struct {
spinlock_t lock; /* Protects access to mmo offsets */
- struct list_head offsets;
+ struct rb_root offsets;
} mmo;
I915_SELFTEST_DECLARE(struct list_head st_link);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23137b2a8689..57e8a051ddc2 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -67,21 +67,18 @@ static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
- err = i915_active_acquire(&ce->active);
- if (err)
- return err;
+ __i915_active_acquire(&ce->active);
+
+ if (intel_context_is_barrier(ce))
+ return 0;
/* Preallocate tracking nodes */
- if (!intel_context_is_barrier(ce)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
- }
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err)
+ i915_active_release(&ce->active);
- return 0;
+ return err;
}
static void intel_context_active_release(struct intel_context *ce)
@@ -101,13 +98,19 @@ int __intel_context_do_pin(struct intel_context *ce)
return err;
}
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return -EINTR;
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex)) {
+ err = -EINTR;
+ goto out_release;
+ }
- if (likely(!atomic_read(&ce->pin_count))) {
+ if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
- goto err;
+ goto out_unlock;
err = ce->ops->pin(ce);
if (unlikely(err))
@@ -117,18 +120,19 @@ int __intel_context_do_pin(struct intel_context *ce)
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
+ atomic_inc(&ce->pin_count);
}
- atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
-
- mutex_unlock(&ce->pin_mutex);
- return 0;
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ goto out_unlock;
err_active:
intel_context_active_release(ce);
-err:
+out_unlock:
mutex_unlock(&ce->pin_mutex);
+out_release:
+ i915_active_release(&ce->active);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f451ef376548..06ff7695fa29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -671,6 +671,7 @@ void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
+ INIT_LIST_HEAD(&engine->active.hold);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
@@ -1422,6 +1423,17 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1491,6 +1503,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
+ drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 350da59e605b..92be41a6903c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -295,6 +295,7 @@ struct intel_engine_cs {
struct {
spinlock_t lock;
struct list_head requests;
+ struct list_head hold; /* ready requests, but on hold */
} active;
struct llist_head barrier_tasks;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0cf0f6fae675..a13a8c4b65ab 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
@@ -1535,7 +1537,8 @@ static bool can_merge_rq(const struct i915_request *prev,
return true;
if (unlikely((prev->fence.flags ^ next->fence.flags) &
- (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
if (!can_merge_ctx(prev->context, next->context))
@@ -1632,8 +1635,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
- if (list_empty(&w->sched.link))
- continue; /* Not yet submitted; unready */
+ if (!i915_request_is_ready(w))
+ continue;
if (rq_prio(w) < rq_prio(rq))
continue;
@@ -2351,6 +2354,310 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
}
}
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ RQ_TRACE(rq, "on hold\n");
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (i915_request_completed(w))
+ continue;
+
+ if (i915_request_on_hold(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ if (i915_request_completed(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ if (rq->engine != engine) { /* preempted virtual engine */
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ /*
+ * intel_context_inflight() is only protected by virtue
+ * of process_csb() being called only by the tasklet (or
+ * directly from inside reset while the tasklet is suspended).
+ * Assert that neither of those are allowed to run while we
+ * poke at the request queues.
+ */
+ GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+
+ /*
+ * An unsubmitted request along a virtual engine will
+ * remain on the active (this) engine until we are able
+ * to process the context switch away (and so mark the
+ * context as no longer in flight). That cannot have happened
+ * yet, otherwise we would not be hanging!
+ */
+ spin_lock(&ve->base.active.lock);
+ GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
+ GEM_BUG_ON(ve->request != rq);
+ ve->request = NULL;
+ spin_unlock(&ve->base.active.lock);
+ i915_request_put(rq);
+
+ rq->engine = engine;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ if (i915_request_on_hold(s))
+ return true;
+ }
+
+ return false;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ RQ_TRACE(rq, "hold release\n");
+
+ /* Also release any children on this engine that are ready */
+ list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(rq))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(rq))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static bool execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return true;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return true;
+
+ cap->rq = execlists_active(&engine->execlists);
+ GEM_BUG_ON(!cap->rq);
+
+ rcu_read_lock();
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ rcu_read_unlock();
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return true;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+ return false;
+}
+
static noinline void preempt_reset(struct intel_engine_cs *engine)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
@@ -2368,7 +2675,12 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
READ_ONCE(engine->props.preempt_timeout_ms),
jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
- intel_engine_reset(engine, "preemption time out");
+
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ if (execlists_capture(engine))
+ intel_engine_reset(engine, "preemption time out");
+ else
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -2430,11 +2742,12 @@ static void execlists_preempt(struct timer_list *timer)
}
static void queue_request(struct intel_engine_cs *engine,
- struct i915_sched_node *node,
- int prio)
+ struct i915_request *rq)
{
- GEM_BUG_ON(!list_empty(&node->link));
- list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -2462,6 +2775,13 @@ static void submit_queue(struct intel_engine_cs *engine,
__submit_queue_imm(engine);
}
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -2470,12 +2790,17 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
- queue_request(engine, &request->sched, rq_prio(request));
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, request);
+ submit_queue(engine, request);
+ }
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -2531,7 +2856,6 @@ static void execlists_context_unpin(struct intel_context *ce)
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
@@ -3325,6 +3649,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ mark_eio(rq);
+
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index a560b7eee2cd..f2806381733f 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -59,11 +59,26 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
+ ring->vma = i915_vma_alloc();
+ if (!ring->vma) {
+ kfree(ring);
+ return NULL;
+ }
+ i915_active_init(&ring->vma->active, NULL, NULL);
+
intel_ring_update_space(ring);
return ring;
}
+static void mock_ring_free(struct intel_ring *ring)
+{
+ i915_active_fini(&ring->vma->active);
+ i915_vma_free(ring->vma);
+
+ kfree(ring);
+}
+
static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -121,7 +136,7 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
- kfree(ce->ring);
+ mock_ring_free(ce->ring);
mock_timeline_unpin(ce->timeline);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 15cda024e3e4..65718ca2326e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -285,6 +285,107 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ engine_heartbeat_enable(engine, heartbeat);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -3309,12 +3410,168 @@ static int live_virtual_bond(void *arg)
return 0;
}
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ unsigned long *heartbeat;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
+ if (!heartbeat)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != ve->engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_enable(siblings[n], heartbeat[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+out_free:
+ kfree(heartbeat);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_hold_reset),
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
@@ -3333,6 +3590,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 049775e8e350..b0c1fda32977 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -146,7 +146,7 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
- kfree(gvt->firmware.mmio);
+ vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
@@ -229,7 +229,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
- mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 34cb404ba4b7..4a4828074cb7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1956,7 +1956,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
+
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index f3da5c06f331..b0a499753526 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -416,13 +416,15 @@ int i915_active_acquire(struct i915_active *ref)
if (err)
return err;
- if (!atomic_read(&ref->count) && ref->active)
- err = ref->active(ref);
- if (!err) {
- spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
- debug_active_activate(ref);
- atomic_inc(&ref->count);
- spin_unlock_irq(&ref->tree_lock);
+ if (likely(!i915_active_acquire_if_busy(ref))) {
+ if (ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
+ }
}
mutex_unlock(&ref->mutex);
@@ -605,7 +607,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
- struct llist_node *pos = NULL, *next;
+ struct llist_node *first = NULL, *last = NULL;
struct intel_gt *gt = engine->gt;
int err;
@@ -623,6 +625,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
*/
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
+ struct llist_node *prev = first;
struct active_node *node;
node = reuse_idle_barrier(ref, idx);
@@ -656,23 +659,23 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- next = barrier_to_ll(node);
- next->next = pos;
- if (!pos)
- pos = next;
+ first = barrier_to_ll(node);
+ first->next = prev;
+ if (!last)
+ last = first;
intel_engine_pm_get(engine);
}
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
- llist_add_batch(next, pos, &ref->preallocated_barriers);
+ llist_add_batch(first, last, &ref->preallocated_barriers);
return 0;
unwind:
- while (pos) {
- struct active_node *node = barrier_from_ll(pos);
+ while (first) {
+ struct active_node *node = barrier_from_ll(first);
- pos = pos->next;
+ first = first->next;
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b571f675c795..51e1e854ca55 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -188,6 +188,12 @@ int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+static inline void __i915_active_acquire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ atomic_inc(&ref->count);
+}
+
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94f993e4c12f..c2de2f45b459 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -265,7 +265,10 @@ i915_gem_dumb_create(struct drm_file *file,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
- args->size = args->pitch * args->height;
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
mem_type = INTEL_MEMORY_SYSTEM;
if (HAS_LMEM(to_i915(dev)))
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4c1836f0a991..594341e27a47 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1681,7 +1681,7 @@ static const char *error_msg(struct i915_gpu_coredump *error)
"GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
generate_ecode(first));
- if (first) {
+ if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 9109004956bd..e4a6afed3bbf 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -314,8 +314,11 @@ i915_vma_capture_finish(struct intel_gt_coredump *gt,
}
static inline void
-i915_error_state_store(struct drm_i915_private *i915,
- struct i915_gpu_coredump *error)
+i915_error_state_store(struct i915_gpu_coredump *error)
+{
+}
+
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 28a82c849bac..ec0299490dd4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -637,8 +637,10 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -648,6 +650,14 @@ static void i915_pmu_enable(struct perf_event *event)
BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ if (pmu->enable_count[bit] == 0 &&
+ config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get();
+ }
+
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -688,6 +698,8 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index be185886e4fc..78a5f5d3c070 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -221,6 +221,8 @@ static void remove_from_engine(struct i915_request *rq)
locked = engine;
}
list_del_init(&rq->sched.link);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
spin_unlock_irq(&locked->active.lock);
}
@@ -408,8 +410,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+ if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ }
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 031433691a06..f57eadcf3583 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -71,6 +71,18 @@ enum {
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
+ * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+ *
+ * Using the scheduler, when a request is ready for execution it is put
+ * into the priority queue, and removed from that queue when transferred
+ * to the HW runlists. We want to track its membership within the
+ * priority queue so that we can easily check before rescheduling.
+ *
+ * See i915_request_in_priority_queue()
+ */
+ I915_FENCE_FLAG_PQUEUE,
+
+ /*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
* Internal bookkeeping used by the breadcrumb code to track when
@@ -79,6 +91,13 @@ enum {
I915_FENCE_FLAG_SIGNAL,
/*
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ *
+ * This request has been suspended, pending an ongoing investigation.
+ */
+ I915_FENCE_FLAG_HOLD,
+
+ /*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
*
* The execution of some requests should not be interrupted. This is
@@ -361,6 +380,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -454,6 +478,27 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
return __i915_request_has_started(rq);
}
+/**
+ * i915_request_is_running - check if the request is ready for execution
+ * @rq: the request
+ *
+ * Upon construction, the request is instructed to wait upon various
+ * signals before it is ready to be executed by the HW. That is, we do
+ * not want to start execution and read data before it is written. In practice,
+ * this is controlled with a mixture of interrupts and semaphores. Once
+ * the submit fence is completed, the backend scheduler will place the
+ * request into its queue and from there submit it for execution. So we
+ * can detect when a request is eligible for execution (and is under control
+ * of the scheduler) by querying where it is in any of the scheduler's lists.
+ *
+ * Returns true if the request is ready for execution (it may be inflight),
+ * false otherwise.
+ */
+static inline bool i915_request_is_ready(const struct i915_request *rq)
+{
+ return !list_empty(&rq->sched.link);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
if (i915_request_signaled(rq))
@@ -483,6 +528,21 @@ static inline bool i915_request_has_sentinel(const struct i915_request *rq)
return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
}
+static inline bool i915_request_on_hold(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
+}
+
+static inline void i915_request_set_hold(struct i915_request *rq)
+{
+ set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline void i915_request_clear_hold(struct i915_request *rq)
+{
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
static inline struct intel_timeline *
i915_request_timeline(struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..5d96cfba40f8 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
node->attr.priority = prio;
- if (list_empty(&node->link)) {
- /*
- * If the request is not in the priolist queue because
- * it is not yet runnable, then it doesn't contribute
- * to our preemption decisions. On the other hand,
- * if the request is on the HW, it too is not in the
- * queue; but in that case we may still need to reorder
- * the inflight requests.
- */
+ /*
+ * Once the request is ready, it will be placed into the
+ * priority lists and then onto the HW runlist. Before the
+ * request is ready, it does not contribute to our preemption
+ * decisions and we can safely ignore it, as it will, and
+ * any preemption required, be dealt with upon submission.
+ * See engine->submit_request()
+ */
+ if (list_empty(&node->link))
continue;
- }
- if (!intel_engine_is_virtual(engine) &&
- !i915_request_is_active(node_to_request(node))) {
+ if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 17d7c525ea5c..4ff380770b32 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1202,16 +1202,26 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (ret)
return ret;
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
- GEM_BUG_ON(i915_vma_is_active(vma));
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c26219c7a49f..e4b750b0c2d3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 6da59f476aba..b7a618db3ee2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
break;
}
+ atomic_inc(&bo->gpu_usecount);
job->mappings[i] = mapping;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index ca1bc9019600..b3517ff9630c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -30,6 +30,12 @@ struct panfrost_gem_object {
struct mutex lock;
} mappings;
+ /*
+ * Count the number of jobs referencing this BO so we don't let the
+ * shrinker reclaim this object prematurely.
+ */
+ atomic_t gpu_usecount;
+
bool noexec :1;
bool is_heap :1;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index f5dd7b29bc95..288e46c40673 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ if (atomic_read(&bo->gpu_usecount))
+ return false;
+
if (!mutex_trylock(&shmem->pages_lock))
return false;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7c36ec675b73..7157dfd7dea3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -269,8 +269,13 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->render_done_fence);
if (job->mappings) {
- for (i = 0; i < job->bo_count; i++)
+ for (i = 0; i < job->bo_count; i++) {
+ if (!job->mappings[i])
+ break;
+
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
panfrost_gem_mapping_put(job->mappings[i]);
+ }
kvfree(job->mappings);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5ae67d526b1d..328272ff77d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
- drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5bd60ded3d81..909eba43664a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_put_unlocked(&obj->base);
- if (ret)
+ if (ret) {
+ drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
+ }
return &obj->base;
}
@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG("Created object of size %lld\n", size);
+ drm_gem_object_put_unlocked(gem_object);
+
+ DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index d704eccc548f..f01a57e5a5f3 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -771,7 +771,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->reloc_base = smmu_pmu->reg_base;
}
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
smmu_pmu->irq = irq;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 97acc2ba2912..de844b412110 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
return 0;
}
+static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
struct irq_domain *domain, unsigned int virq,
@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
else
handler = handle_level_irq;
+
+ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
+ &qpnpint_irq_request_class);
irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
handler, NULL, NULL);
}
diff --git a/fs/dax.c b/fs/dax.c
index 1f1f0201cad1..35da144375a0 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -937,12 +937,11 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
* on persistent storage prior to completion of the operation.
*/
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
struct inode *inode = mapping->host;
pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
- struct dax_device *dax_dev;
void *entry;
int ret = 0;
unsigned int scanned = 0;
@@ -953,10 +952,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev)
- return -EIO;
-
trace_dax_writeback_range(inode, xas.xa_index, end_index);
tag_pages_for_writeback(mapping, xas.xa_index, end_index);
@@ -977,7 +972,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
- put_dax(dax_dev);
trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
return ret;
}
@@ -1207,6 +1201,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
lockdep_assert_held(&inode->i_rwsem);
}
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ flags |= IOMAP_NOWAIT;
+
while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, dax_iomap_actor);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 119667e65890..c885cf7d724b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -960,8 +960,9 @@ ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
static int
ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- return dax_writeback_mapping_range(mapping,
- mapping->host->i_sb->s_bdev, wbc);
+ struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
+
+ return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
}
const struct address_space_operations ext2_aops = {
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3313168b680f..1305b810c44a 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2867,7 +2867,7 @@ static int ext4_dax_writepages(struct address_space *mapping,
percpu_down_read(&sbi->s_journal_flag_rwsem);
trace_ext4_writepages(inode, wbc);
- ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3a688eb5c5ae..58e937be24ce 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -587,7 +587,7 @@ xfs_dax_writepages(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
return dax_writeback_mapping_range(mapping,
- xfs_inode_buftarg(ip)->bt_bdev, wbc);
+ xfs_inode_buftarg(ip)->bt_daxdev, wbc);
}
STATIC sector_t
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9bd8528bd305..328c2dbb4409 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -129,11 +129,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
sectors);
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return dax_get_by_host(host);
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
put_dax(dax_dev);
@@ -141,7 +136,7 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
+ struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
dax_entry_t dax_lock_page(struct page *page);
@@ -160,11 +155,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return NULL;
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
}
@@ -180,7 +170,7 @@ static inline struct page *dax_layout_busy_page(struct address_space *mapping)
}
static inline int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index d5765039652a..ae58fad7f1e0 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -29,7 +29,8 @@ struct pipe_buffer {
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
- * @wait: reader/writer wait point in case of empty/full pipe
+ * @rd_wait: reader wait point in case of empty pipe
+ * @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
* @max_usage: The maximum number of slots that may be used in the ring
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af2c85d3a1dd..6c7a10a6d71e 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -440,7 +440,7 @@ struct synth_event_trace_state {
struct synth_event *event;
unsigned int cur_field;
unsigned int n_u64;
- bool enabled;
+ bool disabled;
bool add_next;
bool add_name;
};
diff --git a/init/Kconfig b/init/Kconfig
index cfee56c151f1..452bc1835cd4 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1227,7 +1227,6 @@ endif
config BOOT_CONFIG
bool "Boot config support"
depends on BLK_DEV_INITRD
- select LIBXBC
default y
help
Extra boot config allows system admin to pass a config file as
diff --git a/init/main.c b/init/main.c
index cc0ee4873419..f95b014a5479 100644
--- a/init/main.c
+++ b/init/main.c
@@ -142,6 +142,15 @@ static char *extra_command_line;
/* Extra init arguments */
static char *extra_init_args;
+#ifdef CONFIG_BOOT_CONFIG
+/* Is bootconfig on command line? */
+static bool bootconfig_found;
+static bool initargs_found;
+#else
+# define bootconfig_found false
+# define initargs_found false
+#endif
+
static char *execute_command;
static char *ramdisk_execute_command;
@@ -336,17 +345,30 @@ u32 boot_config_checksum(unsigned char *p, u32 size)
return ret;
}
+static int __init bootconfig_params(char *param, char *val,
+ const char *unused, void *arg)
+{
+ if (strcmp(param, "bootconfig") == 0) {
+ bootconfig_found = true;
+ } else if (strcmp(param, "--") == 0) {
+ initargs_found = true;
+ }
+ return 0;
+}
+
static void __init setup_boot_config(const char *cmdline)
{
+ static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
u32 size, csum;
char *data, *copy;
- const char *p;
u32 *hdr;
int ret;
- p = strstr(cmdline, "bootconfig");
- if (!p || (p != cmdline && !isspace(*(p-1))) ||
- (p[10] && !isspace(p[10])))
+ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
+ bootconfig_params);
+
+ if (!bootconfig_found)
return;
if (!initrd_end)
@@ -562,11 +584,12 @@ static void __init setup_command_line(char *command_line)
* to init.
*/
len = strlen(saved_command_line);
- if (!strstr(boot_command_line, " -- ")) {
+ if (initargs_found) {
+ saved_command_line[len++] = ' ';
+ } else {
strcpy(saved_command_line + len, " -- ");
len += 4;
- } else
- saved_command_line[len++] = ' ';
+ }
strcpy(saved_command_line + len, extra_init_args);
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index db552b9f9377..75f687301bbf 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5927,11 +5927,14 @@ void cgroup_post_fork(struct task_struct *child)
spin_lock_irq(&css_set_lock);
- WARN_ON_ONCE(!list_empty(&child->cg_list));
- cset = task_css_set(current); /* current is @child's parent */
- get_css_set(cset);
- cset->nr_tasks++;
- css_set_move_task(child, NULL, cset, false);
+ /* init tasks are special, only link regular threads */
+ if (likely(child->pid)) {
+ WARN_ON_ONCE(!list_empty(&child->cg_list));
+ cset = task_css_set(current); /* current is @child's parent */
+ get_css_set(cset);
+ cset->nr_tasks++;
+ css_set_move_task(child, NULL, cset, false);
+ }
/*
* If the cgroup has to be frozen, the new task has too. Let's set
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index e7ce7cdac62f..483b3fd1094f 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -1798,6 +1798,60 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
}
EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+static inline int
+__synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
+{
+ int entry_size, fields_size = 0;
+ int ret = 0;
+
+ /*
+ * Normal event tracing doesn't get called at all unless the
+ * ENABLED bit is set (which attaches the probe thus allowing
+ * this code to be called, etc). Because this is called
+ * directly by the user, we don't have that but we still need
+ * to honor not logging when disabled. For the the iterated
+ * trace case, we save the enabed state upon start and just
+ * ignore the following data calls.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
+ trace_trigger_soft_disabled(file)) {
+ trace_state->disabled = true;
+ ret = -ENOENT;
+ goto out;
+ }
+
+ trace_state->event = file->event_call->data;
+
+ fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ trace_state->buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(trace_state->buffer);
+
+ entry_size = sizeof(*trace_state->entry) + fields_size;
+ trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
+ file,
+ entry_size);
+ if (!trace_state->entry) {
+ ring_buffer_nest_end(trace_state->buffer);
+ ret = -EINVAL;
+ }
+out:
+ return ret;
+}
+
+static inline void
+__synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+ trace_event_buffer_commit(&trace_state->fbuffer);
+
+ ring_buffer_nest_end(trace_state->buffer);
+}
+
/**
* synth_event_trace - Trace a synthetic event
* @file: The trace_event_file representing the synthetic event
@@ -1819,71 +1873,38 @@ EXPORT_SYMBOL_GPL(synth_event_cmd_init);
*/
int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
{
- struct trace_event_buffer fbuffer;
- struct synth_trace_event *entry;
- struct trace_buffer *buffer;
- struct synth_event *event;
+ struct synth_event_trace_state state;
unsigned int i, n_u64;
- int fields_size = 0;
va_list args;
- int ret = 0;
-
- /*
- * Normal event generation doesn't get called at all unless
- * the ENABLED bit is set (which attaches the probe thus
- * allowing this code to be called, etc). Because this is
- * called directly by the user, we don't have that but we
- * still need to honor not logging when disabled.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED))
- return 0;
-
- event = file->event_call->data;
-
- if (n_vals != event->n_fields)
- return -EINVAL;
-
- if (trace_trigger_soft_disabled(file))
- return -EINVAL;
-
- fields_size = event->n_u64 * sizeof(u64);
-
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ int ret;
- entry = trace_event_buffer_reserve(&fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
- ret = -EINVAL;
- goto out;
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
}
va_start(args, n_vals);
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
u64 val;
val = va_arg(args, u64);
- if (event->fields[i]->is_string) {
+ if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)val;
- char *str_field = (char *)&entry->fields[n_u64];
+ char *str_field = (char *)&state.entry->fields[n_u64];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
- entry->fields[n_u64] = val;
+ state.entry->fields[n_u64] = val;
n_u64++;
}
}
va_end(args);
- trace_event_buffer_commit(&fbuffer);
-out:
- ring_buffer_nest_end(buffer);
+ __synth_event_trace_end(&state);
return ret;
}
@@ -1910,64 +1931,31 @@ EXPORT_SYMBOL_GPL(synth_event_trace);
int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
unsigned int n_vals)
{
- struct trace_event_buffer fbuffer;
- struct synth_trace_event *entry;
- struct trace_buffer *buffer;
- struct synth_event *event;
+ struct synth_event_trace_state state;
unsigned int i, n_u64;
- int fields_size = 0;
- int ret = 0;
-
- /*
- * Normal event generation doesn't get called at all unless
- * the ENABLED bit is set (which attaches the probe thus
- * allowing this code to be called, etc). Because this is
- * called directly by the user, we don't have that but we
- * still need to honor not logging when disabled.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED))
- return 0;
-
- event = file->event_call->data;
-
- if (n_vals != event->n_fields)
- return -EINVAL;
-
- if (trace_trigger_soft_disabled(file))
- return -EINVAL;
-
- fields_size = event->n_u64 * sizeof(u64);
-
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ int ret;
- entry = trace_event_buffer_reserve(&fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
- ret = -EINVAL;
- goto out;
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
}
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
- if (event->fields[i]->is_string) {
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
+ if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)vals[i];
- char *str_field = (char *)&entry->fields[n_u64];
+ char *str_field = (char *)&state.entry->fields[n_u64];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
- entry->fields[n_u64] = vals[i];
+ state.entry->fields[n_u64] = vals[i];
n_u64++;
}
}
- trace_event_buffer_commit(&fbuffer);
-out:
- ring_buffer_nest_end(buffer);
+ __synth_event_trace_end(&state);
return ret;
}
@@ -2004,58 +1992,17 @@ EXPORT_SYMBOL_GPL(synth_event_trace_array);
int synth_event_trace_start(struct trace_event_file *file,
struct synth_event_trace_state *trace_state)
{
- struct synth_trace_event *entry;
- int fields_size = 0;
- int ret = 0;
+ int ret;
- if (!trace_state) {
- ret = -EINVAL;
- goto out;
- }
+ if (!trace_state)
+ return -EINVAL;
memset(trace_state, '\0', sizeof(*trace_state));
- /*
- * Normal event tracing doesn't get called at all unless the
- * ENABLED bit is set (which attaches the probe thus allowing
- * this code to be called, etc). Because this is called
- * directly by the user, we don't have that but we still need
- * to honor not logging when disabled. For the the iterated
- * trace case, we save the enabed state upon start and just
- * ignore the following data calls.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
- trace_state->enabled = false;
- goto out;
- }
-
- trace_state->enabled = true;
-
- trace_state->event = file->event_call->data;
-
- if (trace_trigger_soft_disabled(file)) {
- ret = -EINVAL;
- goto out;
- }
+ ret = __synth_event_trace_start(file, trace_state);
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
- fields_size = trace_state->event->n_u64 * sizeof(u64);
-
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- trace_state->buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(trace_state->buffer);
-
- entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
- ret = -EINVAL;
- goto out;
- }
-
- trace_state->entry = entry;
-out:
return ret;
}
EXPORT_SYMBOL_GPL(synth_event_trace_start);
@@ -2088,7 +2035,7 @@ static int __synth_event_add_val(const char *field_name, u64 val,
trace_state->add_next = true;
}
- if (!trace_state->enabled)
+ if (trace_state->disabled)
goto out;
event = trace_state->event;
@@ -2223,9 +2170,7 @@ int synth_event_trace_end(struct synth_event_trace_state *trace_state)
if (!trace_state)
return -EINVAL;
- trace_event_buffer_commit(&trace_state->fbuffer);
-
- ring_buffer_nest_end(trace_state->buffer);
+ __synth_event_trace_end(trace_state);
return 0;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d8264ebb9581..362cca52f5de 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1012,7 +1012,7 @@ int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
{
struct dynevent_arg arg;
va_list args;
- int ret;
+ int ret = 0;
if (cmd->type != DYNEVENT_TYPE_KPROBE)
return -EINVAL;
diff --git a/lib/Kconfig b/lib/Kconfig
index 0cf875fd627c..bc7e56370129 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -573,9 +573,6 @@ config DIMLIB
config LIBFDT
bool
-config LIBXBC
- bool
-
config OID_REGISTRY
tristate
help
diff --git a/lib/Makefile b/lib/Makefile
index 5d64890d6b6a..611872c06926 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -230,7 +230,7 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
-lib-$(CONFIG_LIBXBC) += bootconfig.o
+lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index afb2e767e6fe..3ea601a2eba5 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -6,12 +6,13 @@
#define pr_fmt(fmt) "bootconfig: " fmt
+#include <linux/bootconfig.h>
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
-#include <linux/bootconfig.h>
#include <linux/string.h>
/*
@@ -23,7 +24,7 @@
* node (for array).
*/
-static struct xbc_node xbc_nodes[XBC_NODE_MAX] __initdata;
+static struct xbc_node *xbc_nodes __initdata;
static int xbc_node_num __initdata;
static char *xbc_data __initdata;
static size_t xbc_data_size __initdata;
@@ -719,7 +720,8 @@ void __init xbc_destroy_all(void)
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- memset(xbc_nodes, 0, sizeof(xbc_nodes));
+ memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_nodes = NULL;
}
/**
@@ -748,6 +750,13 @@ int __init xbc_init(char *buf)
return -ERANGE;
}
+ xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
+ SMP_CACHE_BYTES);
+ if (!xbc_nodes) {
+ pr_err("Failed to allocate memory for bootconfig nodes.\n");
+ return -ENOMEM;
+ }
+ memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_data = buf;
xbc_data_size = ret + 1;
last_parent = NULL;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index a566d8201b56..0133dfaaf352 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -210,7 +210,7 @@ static struct sym_entry *read_symbol(FILE *in)
len = strlen(name) + 1;
- sym = malloc(sizeof(*sym) + len);
+ sym = malloc(sizeof(*sym) + len + 1);
if (!sym) {
fprintf(stderr, "kallsyms failure: "
"unable to allocate required amount of memory\n");
@@ -219,7 +219,7 @@ static struct sym_entry *read_symbol(FILE *in)
sym->addr = addr;
sym->len = len;
sym->sym[0] = type;
- memcpy(sym_name(sym), name, len);
+ strcpy(sym_name(sym), name);
sym->percpu_absolute = 0;
return sym;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 1919c311c149..dd484e92752e 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -239,7 +239,7 @@ else
fi;
# final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init need-builtin=1
#link vmlinux.o
info LD vmlinux.o
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b6991e178d3..1659b59fb5d7 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -698,7 +698,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
- !strcmp(sb->s_type->name, "binderfs") ||
+ !strcmp(sb->s_type->name, "binder") ||
!strcmp(sb->s_type->name, "pstore"))
sbsec->flags |= SE_SBGENFS;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index a308ce1e6a13..f511ffccb131 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -518,19 +518,13 @@ void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
const char *str, u32 str_len)
{
struct sidtab_str_cache *cache, *victim = NULL;
+ unsigned long flags;
/* do not cache invalid contexts */
if (entry->context.len)
return;
- /*
- * Skip the put operation when in non-task context to avoid the need
- * to disable interrupts while holding s->cache_lock.
- */
- if (!in_task())
- return;
-
- spin_lock(&s->cache_lock);
+ spin_lock_irqsave(&s->cache_lock, flags);
cache = rcu_dereference_protected(entry->cache,
lockdep_is_held(&s->cache_lock));
@@ -561,7 +555,7 @@ void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
rcu_assign_pointer(entry->cache, cache);
out_unlock:
- spin_unlock(&s->cache_lock);
+ spin_unlock_irqrestore(&s->cache_lock, flags);
kfree_rcu(victim, rcu_member);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 336406bcb59e..d5443eeb8b63 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2594,7 +2594,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
snd_pcm_drop(substream);
if (substream->hw_opened) {
- do_hw_free(substream);
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ do_hw_free(substream);
substream->ops->close(substream);
substream->hw_opened = 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4770fb3f51fb..6c8cb4ce517e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
@@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
break;
case HDA_FIXUP_ACT_INIT:
switch (codec->core.vendor_id) {
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_write_coef_idx(codec, 0x48, 0xd011);
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 018b1ecb5404..a48313dfa967 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
return ret;
}
+/*
+ * Assume the clock is valid if clock source supports only one single sample
+ * rate, the terminal is connected directly to it (there is no clock selector)
+ * and clock type is internal. This is to deal with some Denon DJ controllers
+ * that always reports that clock is invalid.
+ */
+static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
+ struct audioformat *fmt,
+ int source_id)
+{
+ if (fmt->protocol == UAC_VERSION_2) {
+ struct uac_clock_source_descriptor *cs_desc =
+ snd_usb_find_clock_source(chip->ctrl_intf, source_id);
+
+ if (!cs_desc)
+ return false;
+
+ return (fmt->nr_rates == 1 &&
+ (fmt->clock & 0xff) == cs_desc->bClockID &&
+ (cs_desc->bmAttributes & 0x3) !=
+ UAC_CLOCK_SOURCE_TYPE_EXT);
+ }
+
+ return false;
+}
+
static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
- int protocol,
+ struct audioformat *fmt,
int source_id)
{
int err;
@@ -160,7 +186,7 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev;
u32 bmControls;
- if (protocol == UAC_VERSION_3) {
+ if (fmt->protocol == UAC_VERSION_3) {
struct uac3_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
@@ -194,10 +220,14 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
return false;
}
- return data ? true : false;
+ if (data)
+ return true;
+ else
+ return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
}
-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
+static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, int entity_id,
unsigned long *visited, bool validate)
{
struct uac_clock_source_descriptor *source;
@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
if (source) {
entity_id = source->bClockID;
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) {
usb_audio_err(chip,
"clock source %d is not valid, cannot use\n",
@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
}
cur = ret;
- ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1],
- visited, validate);
+ ret = __uac_clock_find_source(chip, fmt,
+ selector->baCSourceID[ret - 1],
+ visited, validate);
if (!validate || ret > 0 || !chip->autoclock)
return ret;
@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur)
continue;
- ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1],
- visited, true);
+ ret = __uac_clock_find_source(chip, fmt,
+ selector->baCSourceID[i - 1],
+ visited, true);
if (ret < 0)
continue;
@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
/* FIXME: multipliers only act as pass-thru element for now */
multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
if (multiplier)
- return __uac_clock_find_source(chip, multiplier->bCSourceID,
- visited, validate);
+ return __uac_clock_find_source(chip, fmt,
+ multiplier->bCSourceID,
+ visited, validate);
return -EINVAL;
}
-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
- unsigned long *visited, bool validate)
+static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, int entity_id,
+ unsigned long *visited, bool validate)
{
struct uac3_clock_source_descriptor *source;
struct uac3_clock_selector_descriptor *selector;
@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
if (source) {
entity_id = source->bClockID;
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) {
usb_audio_err(chip,
"clock source %d is not valid, cannot use\n",
@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
}
cur = ret;
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1],
+ ret = __uac3_clock_find_source(chip, fmt,
+ selector->baCSourceID[ret - 1],
visited, validate);
if (!validate || ret > 0 || !chip->autoclock)
return ret;
@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur)
continue;
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1],
- visited, true);
+ ret = __uac3_clock_find_source(chip, fmt,
+ selector->baCSourceID[i - 1],
+ visited, true);
if (ret < 0)
continue;
@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
entity_id);
if (multiplier)
- return __uac3_clock_find_source(chip, multiplier->bCSourceID,
+ return __uac3_clock_find_source(chip, fmt,
+ multiplier->bCSourceID,
visited, validate);
return -EINVAL;
@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
*
* Returns the clock source UnitID (>=0) on success, or an error.
*/
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
- int entity_id, bool validate)
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, bool validate)
{
DECLARE_BITMAP(visited, 256);
memset(visited, 0, sizeof(visited));
- switch (protocol) {
+ switch (fmt->protocol) {
case UAC_VERSION_2:
- return __uac_clock_find_source(chip, entity_id, visited,
+ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
validate);
case UAC_VERSION_3:
- return __uac3_clock_find_source(chip, entity_id, visited,
+ return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
validate);
default:
return -EINVAL;
@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* automatic clock selection if the current clock is not
* valid.
*/
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
- fmt->clock, true);
+ clock = snd_usb_clock_find_source(chip, fmt, true);
if (clock < 0) {
/* We did not find a valid clock, but that might be
* because the current sample rate does not match an
@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* and we will do another validation after setting the
* rate.
*/
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
- fmt->clock, false);
+ clock = snd_usb_clock_find_source(chip, fmt, false);
if (clock < 0)
return clock;
}
@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
validation:
/* validate clock after rate change */
- if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
+ if (!uac_clock_source_is_valid(chip, fmt, clock))
return -ENXIO;
return 0;
}
diff --git a/sound/usb/clock.h b/sound/usb/clock.h
index 076e31b79ee0..68df0fbe09d0 100644
--- a/sound/usb/clock.h
+++ b/sound/usb/clock.h
@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
struct usb_host_interface *alts,
struct audioformat *fmt, int rate);
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
- int entity_id, bool validate);
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, bool validate);
#endif /* __USBAUDIO_CLOCK_H */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 9260136e4c9b..9f5cb4ed3a0c 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -151,6 +151,19 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
return pcm_formats;
}
+static int set_fixed_rate(struct audioformat *fp, int rate, int rate_bits)
+{
+ kfree(fp->rate_table);
+ fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+ if (!fp->rate_table)
+ return -ENOMEM;
+ fp->nr_rates = 1;
+ fp->rate_min = rate;
+ fp->rate_max = rate;
+ fp->rates = rate_bits;
+ fp->rate_table[0] = rate;
+ return 0;
+}
/*
* parse the format descriptor and stores the possible sample rates
@@ -223,6 +236,14 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
fp->rate_min = combine_triple(&fmt[offset + 1]);
fp->rate_max = combine_triple(&fmt[offset + 4]);
}
+
+ /* Jabra Evolve 65 headset */
+ if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) {
+ /* only 48kHz for playback while keeping 16kHz for capture */
+ if (fp->nr_rates != 1)
+ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
+ }
+
return 0;
}
@@ -299,17 +320,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
- /* supported rates: 48Khz */
- kfree(fp->rate_table);
- fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
- if (!fp->rate_table)
- return -ENOMEM;
- fp->nr_rates = 1;
- fp->rate_min = 48000;
- fp->rate_max = 48000;
- fp->rates = SNDRV_PCM_RATE_48000;
- fp->rate_table[0] = 48000;
- return 0;
+ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
}
return -ENODEV;
@@ -325,8 +336,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev;
unsigned char tmp[2], *data;
int nr_triplets, data_size, ret = 0, ret_l6;
- int clock = snd_usb_clock_find_source(chip, fp->protocol,
- fp->clock, false);
+ int clock = snd_usb_clock_find_source(chip, fp, false);
if (clock < 0) {
dev_err(&dev->dev,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d659fdb475e2..81b2db0edd5f 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
return 0;
}
+static int parse_term_effect_unit(struct mixer_build *state,
+ struct usb_audio_term *term,
+ void *p1, int id)
+{
+ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
+ term->id = id;
+ return 0;
+}
+
static int parse_term_uac2_clock_source(struct mixer_build *state,
struct usb_audio_term *term,
void *p1, int id)
@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id,
UAC3_PROCESSING_UNIT);
case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
- return parse_term_proc_unit(state, term, p1, id,
- UAC3_EFFECT_UNIT);
+ return parse_term_effect_unit(state, term, p1, id);
case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 3a5242e383b2..7f558f4b4520 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1440,6 +1440,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
return true;
}
diff --git a/tools/bootconfig/include/linux/memblock.h b/tools/bootconfig/include/linux/memblock.h
new file mode 100644
index 000000000000..7862f217d85d
--- /dev/null
+++ b/tools/bootconfig/include/linux/memblock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XBC_LINUX_MEMBLOCK_H
+#define _XBC_LINUX_MEMBLOCK_H
+
+#include <stdlib.h>
+
+#define __pa(addr) (addr)
+#define SMP_CACHE_BYTES 0
+#define memblock_alloc(size, align) malloc(size)
+#define memblock_free(paddr, size) free(paddr)
+
+#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
index 017bcd6912a5..e978a63d3222 100644
--- a/tools/bootconfig/include/linux/printk.h
+++ b/tools/bootconfig/include/linux/printk.h
@@ -7,7 +7,7 @@
/* controllable printf */
extern int pr_output;
#define printk(fmt, ...) \
- (pr_output ? printf(fmt, __VA_ARGS__) : 0)
+ (pr_output ? printf(fmt, ##__VA_ARGS__) : 0)
#define pr_err printk
#define pr_warn printk
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 47f488458328..e18eeb070562 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -140,7 +140,7 @@ int load_xbc_from_initrd(int fd, char **buf)
return 0;
if (lseek(fd, -8, SEEK_END) < 0) {
- printf("Failed to lseek: %d\n", -errno);
+ pr_err("Failed to lseek: %d\n", -errno);
return -errno;
}
@@ -155,7 +155,7 @@ int load_xbc_from_initrd(int fd, char **buf)
return 0;
if (lseek(fd, stat.st_size - 8 - size, SEEK_SET) < 0) {
- printf("Failed to lseek: %d\n", -errno);
+ pr_err("Failed to lseek: %d\n", -errno);
return -errno;
}
@@ -166,7 +166,7 @@ int load_xbc_from_initrd(int fd, char **buf)
/* Wrong Checksum, maybe no boot config here */
rcsum = checksum((unsigned char *)*buf, size);
if (csum != rcsum) {
- printf("checksum error: %d != %d\n", csum, rcsum);
+ pr_err("checksum error: %d != %d\n", csum, rcsum);
return 0;
}
@@ -185,13 +185,13 @@ int show_xbc(const char *path)
fd = open(path, O_RDONLY);
if (fd < 0) {
- printf("Failed to open initrd %s: %d\n", path, fd);
+ pr_err("Failed to open initrd %s: %d\n", path, fd);
return -errno;
}
ret = load_xbc_from_initrd(fd, &buf);
if (ret < 0)
- printf("Failed to load a boot config from initrd: %d\n", ret);
+ pr_err("Failed to load a boot config from initrd: %d\n", ret);
else
xbc_show_compact_tree();
@@ -209,7 +209,7 @@ int delete_xbc(const char *path)
fd = open(path, O_RDWR);
if (fd < 0) {
- printf("Failed to open initrd %s: %d\n", path, fd);
+ pr_err("Failed to open initrd %s: %d\n", path, fd);
return -errno;
}
@@ -222,7 +222,7 @@ int delete_xbc(const char *path)
pr_output = 1;
if (size < 0) {
ret = size;
- printf("Failed to load a boot config from initrd: %d\n", ret);
+ pr_err("Failed to load a boot config from initrd: %d\n", ret);
} else if (size > 0) {
ret = fstat(fd, &stat);
if (!ret)
@@ -245,7 +245,7 @@ int apply_xbc(const char *path, const char *xbc_path)
ret = load_xbc_file(xbc_path, &buf);
if (ret < 0) {
- printf("Failed to load %s : %d\n", xbc_path, ret);
+ pr_err("Failed to load %s : %d\n", xbc_path, ret);
return ret;
}
size = strlen(buf) + 1;
@@ -262,7 +262,7 @@ int apply_xbc(const char *path, const char *xbc_path)
/* Check the data format */
ret = xbc_init(buf);
if (ret < 0) {
- printf("Failed to parse %s: %d\n", xbc_path, ret);
+ pr_err("Failed to parse %s: %d\n", xbc_path, ret);
free(data);
free(buf);
return ret;
@@ -279,20 +279,20 @@ int apply_xbc(const char *path, const char *xbc_path)
/* Remove old boot config if exists */
ret = delete_xbc(path);
if (ret < 0) {
- printf("Failed to delete previous boot config: %d\n", ret);
+ pr_err("Failed to delete previous boot config: %d\n", ret);
return ret;
}
/* Apply new one */
fd = open(path, O_RDWR | O_APPEND);
if (fd < 0) {
- printf("Failed to open %s: %d\n", path, fd);
+ pr_err("Failed to open %s: %d\n", path, fd);
return fd;
}
/* TODO: Ensure the @path is initramfs/initrd image */
ret = write(fd, data, size + 8);
if (ret < 0) {
- printf("Failed to apply a boot config: %d\n", ret);
+ pr_err("Failed to apply a boot config: %d\n", ret);
return ret;
}
close(fd);
@@ -334,12 +334,12 @@ int main(int argc, char **argv)
}
if (apply && delete) {
- printf("Error: You can not specify both -a and -d at once.\n");
+ pr_err("Error: You can not specify both -a and -d at once.\n");
return usage();
}
if (optind >= argc) {
- printf("Error: No initrd is specified.\n");
+ pr_err("Error: No initrd is specified.\n");
return usage();
}
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index 87725e8723f8..1de06de328e2 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -64,6 +64,15 @@ echo "File size check"
new_size=$(stat -c %s $INITRD)
xpass test $new_size -eq $initrd_size
+echo "No error messge while applying"
+OUTFILE=`mktemp tempout-XXXX`
+dd if=/dev/zero of=$INITRD bs=4096 count=1
+printf " \0\0\0 \0\0\0" >> $INITRD
+$BOOTCONF -a $TEMPCONF $INITRD > $OUTFILE 2>&1
+xfail grep -i "failed" $OUTFILE
+xfail grep -i "error" $OUTFILE
+rm $OUTFILE
+
echo "Max node number check"
echo -n > $TEMPCONF