From 9f591ae60e1be026901398ef99eede91237aa3a1 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Wed, 21 Mar 2018 15:08:47 +0100 Subject: drm/i915/gvt: throw error on unhandled vfio ioctls On unknown/unhandled ioctls the driver should return an error, so userspace knows it tried to use something unsupported. Cc: stable@vger.kernel.org Signed-off-by: Gerd Hoffmann Reviewed-by: Alex Williamson Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 520fe3d0a882..f8540cc67b44 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1254,7 +1254,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } - return 0; + return -ENOTTY; } static ssize_t -- cgit From ac0fd9cfc837d1d30cb958dc34769e90aad43078 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 22 Mar 2018 12:27:54 -0500 Subject: drm/i915/gvt: Mark expected switch fall-through in handle_g2v_notification In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1466154 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8c5d5d005854..a33c1c3e4a21 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; + /* fall through */ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); -- cgit From 5da795b061a139b55d7393e79c7af688c58a4267 Mon Sep 17 00:00:00 2001 From: Zhipeng Gong Date: Mon, 26 Mar 2018 15:18:56 +0800 Subject: drm/i915/gvt: Make MI_USER_INTERRUPT nop in cmd parser GVT-g dispatches request to host i915 and depends on i915 notify ring interrupt mechanism to check completion of request. For now MI_USER_INTERRUPT in guest requests is passed through in GVT-g cmd parser and i915 does not use it, which causes unnecessary interrupt handling in i915. On the other hand, if several requests from guest are combined into one request in and contain MI_USER_INTERRUPT in the middle of combined request. GVT-g still has to wait on the whole request to complete to inject user interrupts to guest. This patch makes all the MI_USER_INTERRUPT nop to save some interrupt handling. Here is test result to run glmark2 on guest for 10 seconds: host master interrupts number is reduced from 16021 to 11162 host user interrupts number is reduced from 7936 to 3536 v2: - revise commit message. (Kevin) Reviewed-by: Kevin Tian Signed-off-by: Zhipeng Gong Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..d301a204e981 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1079,6 +1079,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } -- cgit From 7598e8700e9a507496660219924ca8c5aa3088d6 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Mar 2018 15:35:14 +0800 Subject: drm/i915/gvt: Missed to cancel dma map for ggtt entries We have canceled dma map for ppgtt entries. Also we need to do it for ggtt entries when them are invalidated. This can fix task hung issue as: [13517.791767] INFO: task gvt_service_thr:1081 blocked for more than 120 seconds. [13517.792584] Not tainted 4.14.15+ #3 [13517.793417] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [13517.794267] gvt_service_thr D 0 1081 2 0x80000000 [13517.795132] Call Trace: [13517.795996] ? __schedule+0x493/0x77b [13517.796859] schedule+0x79/0x82 [13517.797740] schedule_preempt_disabled+0x5/0x6 [13517.798614] __mutex_lock.isra.0+0x2b5/0x445 [13517.799504] ? __switch_to_asm+0x24/0x60 [13517.800381] ? intel_gvt_cleanup+0x10/0x10 [13517.801261] ? intel_gvt_schedule+0x19/0x2b9 [13517.802107] intel_gvt_schedule+0x19/0x2b9 [13517.802954] ? intel_gvt_cleanup+0x10/0x10 [13517.803824] gvt_service_thread+0xe3/0x10d [13517.804704] ? wait_woken+0x68/0x68 [13517.805588] kthread+0x118/0x120 [13517.806478] ? kthread_create_on_node+0x3a/0x3a [13517.807381] ? call_usermodehelper_exec_async+0x113/0x11a [13517.808307] ret_from_fork+0x35/0x40 v3: split out ggtt reset case. v2: also unmap ggtt during reset. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d29281231507..8c9477c272be 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, false, 0, mm->vgpu); } +static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, + struct intel_gvt_gtt_entry *entry, unsigned long index) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); +} + static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { @@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, return ret; } +static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + pfn = pte_ops->get_pfn(entry); + if (pfn != vgpu->gvt->gtt.scratch_mfn) + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, + pfn << PAGE_SHIFT); +} + static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); - m = e; if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); + m = e; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn @@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); - } else + } else { + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); + ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + } out: ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); -- cgit From f4c43db356198d8091442f593871b0beb5c139b2 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Mar 2018 15:35:15 +0800 Subject: drm/i915/gvt: Cancel dma map when resetting ggtt entries Ditto, don't forget ggtt entries during reset. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 22 +++++++++++++++++----- drivers/gpu/drm/i915/gvt/gtt.h | 2 +- 2 files changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 8c9477c272be..78e55aafc8bc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2056,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return PTR_ERR(gtt->ggtt_mm); } - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, false); return create_scratch_page_tree(vgpu); } @@ -2341,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) /** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU + * @invalidate_old: invalidate old entries * * This function is called at the vGPU create stage * to reset all the GGTT entries. * */ -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry old_entry; u32 index; u32 num_entries; @@ -2360,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } ggtt_invalidate(dev_priv); } @@ -2386,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) * removing the shadow pages. */ intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a8b369cd352b..3792f2b7f4ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -193,7 +193,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); -- cgit From a733390f9a79876635013e57da25f91b6e78ffe6 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Wed, 28 Mar 2018 05:30:13 +0800 Subject: drm/i915/gvt: Delete redundant error message in fb_decode.c Much error message exist in host dmesg when guest boot up with local display enabled. [ 167.680011] gvt: vgpu 1: invalid range gmadr 0x0 size 0x0 [ 167.680013] gvt: vgpu 1: invalid gma address: 0 The second error line duplicate with the first error line, so this patch remove this redundant error message and make the next error message much clearer. Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/fb_decoder.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6b50fe78dc1b..1c120683e958 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } -- cgit From 65eff272330c72689fb5e20dd6491826fd87a39c Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Wed, 28 Mar 2018 05:30:14 +0800 Subject: drm/i915/gvt: Disable primary/sprite/cursor plane at virtual display initialization Much error exist in host dmesg during guest boot up with loca display enabled. gvt: vgpu 1: invalid range gmadr 0x0 size 0x0 This error happens when qemu get dmabuf info in case that the virtual display plane is enabled but its base address is an invalid 0, such case may be true before guest enable its plane. At this moment, its state is copied from host where the plane may be enabled. This patch disable primary/sprite/cursor plane at virtual display initialization, so intel_vgpu_decode_primary/cursor/sprite could return early as plane is disabled, then plane base check is skipped and error message disapper. Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/display.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index dd96ffc878ac..6d8180e8d1e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe; + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + /* Disable Primary/Sprite/Cursor plane */ + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; + } + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } -- cgit From 10996f802109c83421ca30556cfe36ffc3bebae3 Mon Sep 17 00:00:00 2001 From: Tina Zhang Date: Wed, 28 Mar 2018 13:49:29 +0800 Subject: drm/i915/gvt: Add drm_format_mod update Add drm_format_mod update, which is omitted. Fixes: e546e281("drm/i915/gvt: Dmabuf support for GVT-g") Cc: stable@vger.kernel.org Signed-off-by: Tina Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index b555eb26f9ce..6f4f8e941fc2 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, struct intel_vgpu_fb_info *fb_info) { gvt_dmabuf->drm_format = fb_info->drm_format; + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; gvt_dmabuf->width = fb_info->width; gvt_dmabuf->height = fb_info->height; gvt_dmabuf->stride = fb_info->stride; -- cgit From 38057aa1639b74d5c1e0dc1ca7c22bc7a31de714 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 24 Mar 2018 12:58:29 +0000 Subject: drm/i915/execlists: Clear user-active flag on preemption completion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When cancelling the requests and clearing out the ports following a successful preemption completion, also clear the active flag. I had assumed that all preemptions would be followed by an immediate dequeue (preserving the active user flag), but under rare circumstances we may be triggering a preemption for the second port only for it to have completed before the preemotion kicks in; leaving execlists->active set even though the system is now idle. We can clear the flag inside the common execlists_cancel_port_requests() as the other users also expect the semantics of active being cleared. Fixes: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports") Signed-off-by: Chris Wilson Cc: Michał Winiarski Cc: Michel Thierry Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180324125829.27026-1-chris@chris-wilson.co.uk (cherry picked from commit eed7ec52f214bac2f25395ccaad610fbeb842a6e) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_lrc.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..e3a5f673ff67 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * know the next preemption status we see corresponds * to this ELSP update. */ + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); if (port_count(&port[0]) > 1) goto unlock; @@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) memset(port, 0, sizeof(*port)); port++; } + + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data) if (fw) intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, -- cgit From c0db1b677e1d584fab5d7ac76a32e1c0157542e0 Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Mon, 2 Apr 2018 15:10:35 +0800 Subject: drm/vc4: Fix memory leak during BO teardown During BO teardown, an indirect list 'uniform_addr_offsets' wasn't being freed leading to leaking many 128B allocations. Fix the memory leak by releasing it at teardown time. Cc: stable@vger.kernel.org Fixes: 6d45c81d229d ("drm/vc4: Add support for branching in shader validation.") Signed-off-by: Daniel J Blueman Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180402071035.25356-1-daniel@quora.org --- drivers/gpu/drm/vc4/vc4_bo.c | 2 ++ drivers/gpu/drm/vc4/vc4_validate_shaders.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 2decc8e2c79f..add9cc97a3b6 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) vc4_bo_set_label(obj, -1); if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; @@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) fail: kfree(validation_state.branch_targets); if (validated_shader) { + kfree(validated_shader->uniform_addr_offsets); kfree(validated_shader->texture_samples); kfree(validated_shader); } -- cgit From 43838a23a05fbd13e47d750d3dfd77001536dd33 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 13:27:52 -0400 Subject: random: fix crng_ready() test The crng_init variable has three states: 0: The CRNG is not initialized at all 1: The CRNG has a small amount of entropy, hopefully good enough for early-boot, non-cryptographical use cases 2: The CRNG is fully initialized and we are sure it is safe for cryptographic use cases. The crng_ready() function should only return true once we are in the last state. This addresses CVE-2018-1108. Reported-by: Jann Horn Fixes: e192be9d9a30 ("random: replace non-blocking pool...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o Reviewed-by: Jann Horn --- drivers/char/random.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..c8ec1e70abde 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -427,7 +427,7 @@ struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; -#define crng_ready() (likely(crng_init > 0)) +#define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, @@ -794,7 +794,7 @@ static int crng_fast_load(const char *cp, size_t len) if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; - if (crng_ready()) { + if (crng_init != 0) { spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } @@ -856,7 +856,7 @@ static void _extract_crng(struct crng_state *crng, { unsigned long v, flags; - if (crng_init > 1 && + if (crng_ready() && time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); @@ -1139,7 +1139,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, sizeof(fast_pool->pool))) { @@ -2212,7 +2212,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); return; } -- cgit From dc12baacb95f205948f64dc936a47d89ee110117 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 14:58:27 -0400 Subject: random: use a different mixing algorithm for add_device_randomness() add_device_randomness() use of crng_fast_load() was highly problematic. Some callers of add_device_randomness() can pass in a large amount of static information. This would immediately promote the crng_init state from 0 to 1, without really doing much to initialize the primary_crng's internal state with something even vaguely unpredictable. Since we don't have the speed constraints of add_interrupt_randomness(), we can do a better job mixing in the what unpredictability a device driver or architecture maintainer might see fit to give us, and do it in a way which does not bump the crng_init_cnt variable. Also, since add_device_randomness() doesn't bump any entropy accounting in crng_init state 0, mix the device randomness into the input_pool entropy pool as well. This is related to CVE-2018-1108. Reported-by: Jann Horn Fixes: ee7998c50c26 ("random: do not ignore early device randomness") Cc: stable@kernel.org # 4.13+ Signed-off-by: Theodore Ts'o --- drivers/char/random.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index c8ec1e70abde..6baa828c0493 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -787,6 +787,10 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +/* + * crng_fast_load() can be called by code in the interrupt service + * path. So we can't afford to dilly-dally. + */ static int crng_fast_load(const char *cp, size_t len) { unsigned long flags; @@ -813,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len) return 1; } +/* + * crng_slow_load() is called by add_device_randomness, which has two + * attributes. (1) We can't trust the buffer passed to it is + * guaranteed to be unpredictable (so it might not have any entropy at + * all), and (2) it doesn't have the performance constraints of + * crng_fast_load(). + * + * So we do something more comprehensive which is guaranteed to touch + * all of the primary_crng's state, and which uses a LFSR with a + * period of 255 as part of the mixing algorithm. Finally, we do + * *not* advance crng_init_cnt since buffer we may get may be something + * like a fixed DMI table (for example), which might very well be + * unique to the machine, but is otherwise unvarying. + */ +static int crng_slow_load(const char *cp, size_t len) +{ + unsigned long flags; + static unsigned char lfsr = 1; + unsigned char tmp; + unsigned i, max = CHACHA20_KEY_SIZE; + const char * src_buf = cp; + char * dest_buf = (char *) &primary_crng.state[4]; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + if (len > max) + max = len; + + for (i = 0; i < max ; i++) { + tmp = lfsr; + lfsr >>= 1; + if (tmp & 1) + lfsr ^= 0xE1; + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; + lfsr += (tmp << 3) | (tmp >> 5); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + static void crng_reseed(struct crng_state *crng, struct entropy_store *r) { unsigned long flags; @@ -981,10 +1030,8 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; - if (!crng_ready()) { - crng_fast_load(buf, size); - return; - } + if (!crng_ready() && size) + crng_slow_load(buf, size); trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); -- cgit From 8ef35c866f8862df074a49a93b0309725812dea8 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 15:23:56 -0400 Subject: random: set up the NUMA crng instances after the CRNG is fully initialized Until the primary_crng is fully initialized, don't initialize the NUMA crng nodes. Otherwise users of /dev/urandom on NUMA systems before the CRNG is fully initialized can get very bad quality randomness. Of course everyone should move to getrandom(2) where this won't be an issue, but there's a lot of legacy code out there. This related to CVE-2018-1108. Reported-by: Jann Horn Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o --- drivers/char/random.c | 46 +++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 6baa828c0493..02d792f7933f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +#ifdef CONFIG_NUMA +static void numa_crng_init(void) +{ + int i; + struct crng_state *crng; + struct crng_state **pool; + + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + if (cmpxchg(&crng_node_pool, NULL, pool)) { + for_each_node(i) + kfree(pool[i]); + kfree(pool); + } +} +#else +static void numa_crng_init(void) {} +#endif + /* * crng_fast_load() can be called by code in the interrupt service * path. So we can't afford to dilly-dally. @@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; process_random_ready_list(); wake_up_interruptible(&crng_init_wait); @@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { -#ifdef CONFIG_NUMA - int i; - struct crng_state *crng; - struct crng_state **pool; -#endif - init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); - -#ifdef CONFIG_NUMA - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize(crng); - pool[i] = crng; - } - mb(); - crng_node_pool = pool; -#endif return 0; } early_initcall(rand_initialize); -- cgit From 0bb29a849a6433b72e249eea7695477b02056e94 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 12 Apr 2018 00:50:45 -0400 Subject: random: crng_reseed() should lock the crng instance that it is modifying Reported-by: Jann Horn Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o Reviewed-by: Jann Horn --- drivers/char/random.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 02d792f7933f..898233f594b4 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -906,7 +906,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) _crng_backtrack_protect(&primary_crng, buf.block, CHACHA20_KEY_SIZE); } - spin_lock_irqsave(&primary_crng.lock, flags); + spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { unsigned long rv; if (!arch_get_random_seed_long(&rv) && @@ -916,7 +916,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_unlock_irqrestore(&crng->lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); numa_crng_init(); -- cgit From d848e5f8e1ebdb227d045db55fe4f825e82965fa Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 16:32:17 -0400 Subject: random: add new ioctl RNDRESEEDCRNG Add a new ioctl which forces the the crng to be reseeded. Signed-off-by: Theodore Ts'o Cc: stable@kernel.org --- drivers/char/random.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/char/random.c b/drivers/char/random.c index 898233f594b4..3cd3aae24d6d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -429,6 +429,7 @@ struct crng_state primary_crng = { static int crng_init = 0; #define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; +static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, __u32 out[CHACHA20_BLOCK_WORDS]); @@ -933,7 +934,8 @@ static void _extract_crng(struct crng_state *crng, unsigned long v, flags; if (crng_ready() && - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + (time_after(crng_global_init_time, crng->init_time) || + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) @@ -1757,6 +1759,7 @@ static int rand_initialize(void) init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); + crng_global_init_time = jiffies; return 0; } early_initcall(rand_initialize); @@ -1930,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) input_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (crng_init < 2) + return -ENODATA; + crng_reseed(&primary_crng, NULL); + crng_global_init_time = jiffies - 1; + return 0; default: return -EINVAL; } -- cgit From a03cc689e5c9d89d500f4a4dae1a81ea512dbb25 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:25:40 -0500 Subject: watchdog: sch311x_wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/sch311x_wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, if (sch311x_wdt_set_heartbeat(new_timeout)) return -EINVAL; sch311x_wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: -- cgit From 3ff0751fa04208459b74cfbe492df79468b8425c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:30:41 -0500 Subject: watchdog: w83977f_wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/w83977f_wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); -- cgit From e30d69df78fb2667dc58e906cabd0f70ed0af95d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:33:49 -0500 Subject: watchdog: wafer5823wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/wafer5823wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, timeout = new_timeout; wafwdt_stop(); wafwdt_start(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); -- cgit From fdac6a90d2d151abdbb7e5ec14bb9ab64e2931ec Mon Sep 17 00:00:00 2001 From: Veeraiyan Chidambaram Date: Fri, 13 Apr 2018 16:19:24 +0200 Subject: watchdog: renesas-wdt: Add support for WDIOF_CARDRESET This patch adds the WDIOF_CARDRESET support for the Renesas platform watchdog, to know if the board reboot is due to a watchdog reset. This is done via the WOVF bit (bit 4) of the RWTCSRA register, which indicates if RWTCNT overflowed, triggering the reset in last boot. Signed-off-by: Veeraiyan Chidambaram [takeshi.kihara.df: changed to read the RWTCSRA register while clock is enabled] Signed-off-by: Takeshi Kihara Signed-off-by: Wolfram Sang Reviewed-by: Guenter Roeck Reviewed-by: Vladimir Zapolskiy Reviewed-by: Geert Uytterhoeven Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/renesas_wdt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, } static const struct watchdog_info rwdt_ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | + WDIOF_CARDRESET, .identity = "Renesas WDT Watchdog", }; @@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) return PTR_ERR(clk); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); priv->clk_rate = clk_get_rate(clk); + priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & + RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; pm_runtime_put(&pdev->dev); if (!priv->clk_rate) { -- cgit From 49d4d277ca54e04170d39484c8758a0ea9bca37d Mon Sep 17 00:00:00 2001 From: Eddie James Date: Tue, 27 Mar 2018 15:09:27 -0500 Subject: aspeed: watchdog: Set bootstatus during probe Check the aspeed timeout status register to see if the system has booted from the secondary boot source. If so, set the watchdog device bootstatus flag for "Card previously reset the CPU." Signed-off-by: Eddie James Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/aspeed_wdt.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_CTRL_WDT_INTR BIT(2) #define WDT_CTRL_RESET_SYSTEM BIT(1) #define WDT_CTRL_ENABLE BIT(0) +#define WDT_TIMEOUT_STATUS 0x10 +#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) /* * WDT_RESET_WIDTH controls the characteristics of the external pulse (if @@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) struct device_node *np; const char *reset_type; u32 duration; + u32 status; int ret; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); @@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) writel(duration - 1, wdt->base + WDT_RESET_WIDTH); } + status = readl(wdt->base + WDT_TIMEOUT_STATUS); + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); -- cgit From e7c5a571a8d6a266aee9ca3f3f26e5afe3717eca Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Apr 2018 12:34:24 -0700 Subject: libnvdimm, dimm: handle EACCES failures from label reads The new support for the standard _LSR and _LSW methods neglected to also update the nvdimm_init_config_data() and nvdimm_set_config_data() to return the translated error code from failed commands. This precision is necessary because the locked status that was previously returned on ND_CMD_GET_CONFIG_SIZE commands is now returned on ND_CMD_{GET,SET}_CONFIG_DATA commands. If the kernel misses this indication it can inadvertently fall back to label-less mode when it should otherwise avoid all access to locked regions. Cc: Fixes: 4b27db7e26cd ("acpi, nfit: add support for the _LSI, _LSR, and...") Signed-off-by: Dan Williams --- drivers/nvdimm/dimm_devs.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); + int rc = validate_dimm(ndd), cmd_rc = 0; struct nd_cmd_get_config_data_hdr *cmd; struct nvdimm_bus_descriptor *nd_desc; - int rc = validate_dimm(ndd); u32 max_cmd_size, config_size; size_t offset; @@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) cmd->in_offset = offset; rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), ND_CMD_GET_CONFIG_DATA, cmd, - cmd->in_length + sizeof(*cmd), NULL); - if (rc || cmd->status) { - rc = -ENXIO; + cmd->in_length + sizeof(*cmd), &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); @@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len) { - int rc = validate_dimm(ndd); size_t max_cmd_size, buf_offset; struct nd_cmd_set_config_hdr *cmd; + int rc = validate_dimm(ndd), cmd_rc = 0; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; @@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, for (buf_offset = 0; len; len -= cmd->in_length, buf_offset += cmd->in_length) { size_t cmd_size; - u32 *status; cmd->in_offset = offset + buf_offset; cmd->in_length = min(max_cmd_size, len); @@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, /* status is output in the last 4-bytes of the command buffer */ cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); - status = ((void *) cmd) + cmd_size - sizeof(u32); rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); - if (rc || *status) { - rc = rc ? rc : -ENXIO; + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } } -- cgit From b11954a6971baa5842e24e6c0dcf56f117249638 Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:30 +0100 Subject: drm/exynos: Move GEM BOs to drm_framebuffer Since drm_framebuffer can now store GEM objects directly, place them there rather than in our own subclass. As this makes the framebuffer create_handle and destroy functions the same as the GEM framebuffer helper, we can reuse those. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 39 ++++------------------------------ 1 file changed, 4 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 0faaf829f5bf..f28ce493e314 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "exynos_drm_drv.h" @@ -36,7 +37,6 @@ */ struct exynos_drm_fb { struct drm_framebuffer fb; - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; dma_addr_t dma_addr[MAX_FB_BUFFER]; }; @@ -66,40 +66,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, return 0; } -static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - unsigned int i; - - drm_framebuffer_cleanup(fb); - - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { - struct drm_gem_object *obj; - - if (exynos_fb->exynos_gem[i] == NULL) - continue; - - obj = &exynos_fb->exynos_gem[i]->base; - drm_gem_object_unreference_unlocked(obj); - } - - kfree(exynos_fb); - exynos_fb = NULL; -} - -static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - - return drm_gem_handle_create(file_priv, - &exynos_fb->exynos_gem[0]->base, handle); -} - static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { - .destroy = exynos_drm_fb_destroy, - .create_handle = exynos_drm_fb_create_handle, + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, }; struct drm_framebuffer * @@ -121,7 +90,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->exynos_gem[i] = exynos_gem[i]; + exynos_fb->fb.obj[i] = &exynos_gem[i]->base; exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr + mode_cmd->offsets[i]; } -- cgit From 7b30508f5116574f94b50c71d3da1089d145e603 Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:31 +0100 Subject: drm/exynos: Move dma_addr out of exynos_drm_fb This can be calculated from the GEM BO DMA address as well as the offset stored in the base framebuffer. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index f28ce493e314..168c71f80b72 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -37,7 +37,6 @@ */ struct exynos_drm_fb { struct drm_framebuffer fb; - dma_addr_t dma_addr[MAX_FB_BUFFER]; }; static int check_fb_gem_memory_type(struct drm_device *drm_dev, @@ -91,8 +90,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev, goto err; exynos_fb->fb.obj[i] = &exynos_gem[i]->base; - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr - + mode_cmd->offsets[i]; } drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); @@ -160,12 +157,13 @@ err: dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + struct exynos_drm_gem *exynos_gem; if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) return 0; - return exynos_fb->dma_addr[index]; + exynos_gem = to_exynos_gem(fb->obj[index]); + return exynos_gem->dma_addr + fb->offsets[index]; } static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { -- cgit From ff059fcbeed9cbed7421f82d1463dd74c472636e Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:32 +0100 Subject: drm/exynos: exynos_drm_fb -> drm_framebuffer Now exynos_drm_fb is just an empty wrapper around drm_framebuffer, we can drop it. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 168c71f80b72..f0e79178bde6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -27,18 +27,6 @@ #include "exynos_drm_iommu.h" #include "exynos_drm_crtc.h" -#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) - -/* - * exynos specific framebuffer structure. - * - * @fb: drm framebuffer obejct. - * @exynos_gem: array of exynos specific gem object containing a gem object. - */ -struct exynos_drm_fb { - struct drm_framebuffer fb; -}; - static int check_fb_gem_memory_type(struct drm_device *drm_dev, struct exynos_drm_gem *exynos_gem) { @@ -76,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct exynos_drm_gem **exynos_gem, int count) { - struct exynos_drm_fb *exynos_fb; + struct drm_framebuffer *fb; int i; int ret; - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { @@ -89,21 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->fb.obj[i] = &exynos_gem[i]->base; + fb->obj[i] = &exynos_gem[i]->base; } - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); if (ret < 0) { DRM_ERROR("failed to initialize framebuffer\n"); goto err; } - return &exynos_fb->fb; + return fb; err: - kfree(exynos_fb); + kfree(fb); return ERR_PTR(ret); } -- cgit From fcf1fadf4c65eea6c519c773d2d9901e8ad94f5f Mon Sep 17 00:00:00 2001 From: Xidong Wang Date: Wed, 4 Apr 2018 10:38:24 +0100 Subject: drm/i915: Do no use kfree() to free a kmem_cache_alloc() return value Along the eb_lookup_vmas() error path, the return value from kmem_cache_alloc() was freed using kfree(). Fix it to use the proper kmem_cache_free() instead. Fixes: d1b48c1e7184 ("drm/i915: Replace execbuf vma ht with an idr") Signed-off-by: Xidong Wang Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: # v4.14+ Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180404093824.9313-1-chris@chris-wilson.co.uk (cherry picked from commit 6be1187dbffa0027ea379c53f7ca0c782515c610) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8c170db8495d..0414228cd2b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kfree(lut); + kmem_cache_free(eb->i915->luts, lut); goto err_obj; } -- cgit From e6be6bd85654dba55b97758f937c46835d961a44 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Apr 2018 12:27:04 +0100 Subject: drm/i915/pmu: Inspect runtime PM state more carefully while estimating RC6 While thinking about sporadic failures of perf_pmu/rc6-runtime-pm* tests on some CI machines I have concluded that: a) the PMU readout of RC6 can race against runtime PM transitions, and b) there are other reasons than being runtime suspended which can cause intel_runtime_pm_get_if_in_use to fail. Therefore when estimating RC6 the code needs to assert we are indeed in suspended state, and if not, the best we can do is return the last known RC6 value. Without this check we can calculate the estimated value based on un- initialized or inappropriate internal state, which can result in over- estimation, or in any case incorrect value being returned. v2: * Re-arrange the code a bit to avoid second unlock and return branch. (Chris Wilson) v3: * Insert some strategic blank lines and improve commit msg. (Chris Wilson) Signed-off-by: Tvrtko Ursulin Fixes: 1fe699e30113 ("drm/i915/pmu: Fix sleep under atomic in RC6 readout") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105010 Cc: Tvrtko Ursulin Cc: Chris Wilson Cc: Imre Deak Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180410112704.24462-1-tvrtko.ursulin@linux.intel.com (cherry picked from commit 2924bdee21edd6785a4df1b4d17fd3cb265fddd9) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_pmu.c | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..f0519e31543a 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock(&kdev->power.lock); - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + /* + * After the above branch intel_runtime_pm_get_if_in_use failed + * to get the runtime PM reference we cannot assume we are in + * runtime suspend since we can either: a) race with coming out + * of it before we took the power.lock, or b) there are other + * states than suspended which can bring us here. + * + * We need to double-check that we are indeed currently runtime + * suspended and if not we cannot do better than report the last + * known RC6 value. + */ + if (kdev->power.runtime_status == RPM_SUSPENDED) { + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; - spin_unlock(&kdev->power.lock); + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - val = jiffies_to_nsecs(val); - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; + } + spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } -- cgit From a3520b8992e57bc94ab6ec9f95f09c6c932555fd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 11 Apr 2018 16:15:18 +0300 Subject: drm/i915/bios: filter out invalid DDC pins from VBT child devices The VBT contains the DDC pin to use for specific ports. Alas, sometimes the field appears to contain bogus data, and while we check for it later on in intel_gmbus_get_adapter() we fail to check the returned NULL on errors. Oops results. The simplest approach seems to be to catch and ignore the bogus DDC pins already at the VBT parsing phase, reverting to fixed per port default pins. This doesn't guarantee display working, but at least it prevents the oops. And we continue to be fuzzed by VBT. One affected machine is Dell Latitude 5590 where a BIOS upgrade added invalid DDC pins. Typical backtrace: [ 35.461411] WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)) [ 35.461432] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461437] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx [ 35.461445] CPU: 6 PID: 411 Comm: kworker/u16:2 Not tainted 4.16.0-rc7.x64-g1cda370ffded #1 [ 35.461447] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018 [ 35.461450] Workqueue: events_unbound async_run_entry_fn [ 35.461465] RIP: 0010:intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461467] RSP: 0018:ffff9b4e43d47c40 EFLAGS: 00010286 [ 35.461469] RAX: 0000000000000000 RBX: ffff98f90639f800 RCX: ffffffffae051960 [ 35.461471] RDX: 0000000000000001 RSI: 0000000000000092 RDI: 0000000000000246 [ 35.461472] RBP: ffff98f905410000 R08: 0000004d062a83f6 R09: 00000000000003bd [ 35.461474] R10: 0000000000000031 R11: ffffffffad4eda58 R12: ffff98f905410000 [ 35.461475] R13: ffff98f9064c1000 R14: ffff9b4e43d47cf0 R15: ffff98f905410000 [ 35.461477] FS: 0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000 [ 35.461479] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.461481] CR2: 00007f5682359008 CR3: 00000001b700c005 CR4: 00000000003606e0 [ 35.461483] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.461484] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.461486] Call Trace: [ 35.461501] intel_hdmi_set_edid+0x37/0x27f [i915] [ 35.461515] intel_hdmi_detect+0x7c/0x97 [i915] [ 35.461518] drm_helper_probe_single_connector_modes+0xe1/0x6c0 [ 35.461521] drm_setup_crtcs+0x129/0xa6a [ 35.461523] ? __switch_to_asm+0x34/0x70 [ 35.461525] ? __switch_to_asm+0x34/0x70 [ 35.461527] ? __switch_to_asm+0x40/0x70 [ 35.461528] ? __switch_to_asm+0x34/0x70 [ 35.461529] ? __switch_to_asm+0x40/0x70 [ 35.461531] ? __switch_to_asm+0x34/0x70 [ 35.461532] ? __switch_to_asm+0x40/0x70 [ 35.461534] ? __switch_to_asm+0x34/0x70 [ 35.461536] __drm_fb_helper_initial_config_and_unlock+0x34/0x46f [ 35.461538] ? __switch_to_asm+0x40/0x70 [ 35.461541] ? _cond_resched+0x10/0x33 [ 35.461557] intel_fbdev_initial_config+0xf/0x1c [i915] [ 35.461560] async_run_entry_fn+0x2e/0xf5 [ 35.461563] process_one_work+0x15b/0x364 [ 35.461565] worker_thread+0x2c/0x3a0 [ 35.461567] ? process_one_work+0x364/0x364 [ 35.461568] kthread+0x10c/0x122 [ 35.461570] ? _kthread_create_on_node+0x5d/0x5d [ 35.461572] ret_from_fork+0x35/0x40 [ 35.461574] Code: 74 16 89 f6 48 8d 04 b6 48 c1 e0 05 48 29 f0 48 8d 84 c7 e8 11 00 00 c3 48 c7 c6 b0 19 1e c0 48 c7 c7 64 8a 1c c0 e8 47 88 ed ec <0f> 0b 31 c0 c3 8b 87 a4 04 00 00 80 e4 fc 09 c6 89 b7 a4 04 00 [ 35.461604] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461606] ---[ end trace 4fe1e63e2dd93373 ]--- [ 35.461609] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 [ 35.461613] IP: i2c_transfer+0x4/0x86 [ 35.461614] PGD 0 P4D 0 [ 35.461616] Oops: 0000 [#1] SMP PTI [ 35.461618] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx [ 35.461624] CPU: 6 PID: 411 Comm: kworker/u16:2 Tainted: G W 4.16.0-rc7.x64-g1cda370ffded #1 [ 35.461625] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018 [ 35.461628] Workqueue: events_unbound async_run_entry_fn [ 35.461630] RIP: 0010:i2c_transfer+0x4/0x86 [ 35.461631] RSP: 0018:ffff9b4e43d47b30 EFLAGS: 00010246 [ 35.461633] RAX: ffff9b4e43d47b6e RBX: 0000000000000005 RCX: 0000000000000001 [ 35.461635] RDX: 0000000000000002 RSI: ffff9b4e43d47b80 RDI: 0000000000000000 [ 35.461636] RBP: ffff9b4e43d47bd8 R08: 0000004d062a83f6 R09: 00000000000003bd [ 35.461638] R10: 0000000000000031 R11: ffffffffad4eda58 R12: 0000000000000002 [ 35.461639] R13: 0000000000000001 R14: ffff9b4e43d47b6f R15: ffff9b4e43d47c07 [ 35.461641] FS: 0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000 [ 35.461643] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.461645] CR2: 0000000000000010 CR3: 00000001b700c005 CR4: 00000000003606e0 [ 35.461646] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.461647] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.461649] Call Trace: [ 35.461652] drm_do_probe_ddc_edid+0xb3/0x128 [ 35.461654] drm_get_edid+0xe5/0x38d [ 35.461669] intel_hdmi_set_edid+0x45/0x27f [i915] [ 35.461684] intel_hdmi_detect+0x7c/0x97 [i915] [ 35.461687] drm_helper_probe_single_connector_modes+0xe1/0x6c0 [ 35.461689] drm_setup_crtcs+0x129/0xa6a [ 35.461691] ? __switch_to_asm+0x34/0x70 [ 35.461693] ? __switch_to_asm+0x34/0x70 [ 35.461694] ? __switch_to_asm+0x40/0x70 [ 35.461696] ? __switch_to_asm+0x34/0x70 [ 35.461697] ? __switch_to_asm+0x40/0x70 [ 35.461698] ? __switch_to_asm+0x34/0x70 [ 35.461700] ? __switch_to_asm+0x40/0x70 [ 35.461701] ? __switch_to_asm+0x34/0x70 [ 35.461703] __drm_fb_helper_initial_config_and_unlock+0x34/0x46f [ 35.461705] ? __switch_to_asm+0x40/0x70 [ 35.461707] ? _cond_resched+0x10/0x33 [ 35.461724] intel_fbdev_initial_config+0xf/0x1c [i915] [ 35.461727] async_run_entry_fn+0x2e/0xf5 [ 35.461729] process_one_work+0x15b/0x364 [ 35.461731] worker_thread+0x2c/0x3a0 [ 35.461733] ? process_one_work+0x364/0x364 [ 35.461734] kthread+0x10c/0x122 [ 35.461736] ? _kthread_create_on_node+0x5d/0x5d [ 35.461738] ret_from_fork+0x35/0x40 [ 35.461739] Code: 5c fa e1 ad 48 89 df e8 ea fb ff ff e9 2a ff ff ff 0f 1f 44 00 00 31 c0 e9 43 fd ff ff 31 c0 45 31 e4 e9 c5 fd ff ff 41 54 55 53 <48> 8b 47 10 48 83 78 10 00 74 70 41 89 d4 48 89 f5 48 89 fb 65 [ 35.461756] RIP: i2c_transfer+0x4/0x86 RSP: ffff9b4e43d47b30 [ 35.461757] CR2: 0000000000000010 [ 35.461759] ---[ end trace 4fe1e63e2dd93374 ]--- Based on a patch by Fei Li. v2: s/reverting/sticking/ (Chris) Cc: stable@vger.kernel.org Cc: Fei Li Co-developed-by: Fei Li Reported-by: Pavel Nakonechnyi Reported-and-tested-by: Seweryn Kokot Reported-and-tested-by: Laszlo Valko Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105549 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105961 Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180411131519.9091-1-jani.nikula@intel.com (cherry picked from commit f212bf9abe5de9f938fecea7df07046e74052dde) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_bios.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c5c7530ba157..447b721c3be9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, return; aux_channel = child->aux_channel; - ddc_pin = child->ddc_pin; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); - - sanitize_ddc_pin(dev_priv, port); + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { + info->alternate_ddc_pin = ddc_pin; + sanitize_ddc_pin(dev_priv, port); + } else { + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), ddc_pin); + } } if (is_dp) { -- cgit From 4a0559ed99189f259e92ed34f60dd51688f1bc40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2018 10:12:33 +0100 Subject: drm/i915: Call i915_perf_fini() on init_hw error unwind We have to cleanup after i915_perf_init(), even on the error path, as it passes a pointer into the module to the sysfs core. If we fail to unregister the sysctl table, we leave a dangling pointer which then may explode anytime later. Fixes: 9f9b2792b6d3 ("drm/i915/perf: reuse timestamp frequency from device info") Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Matthew Auld Reviewed-by: Lionel Landwerlin Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20180414091233.32224-1-chris@chris-wilson.co.uk (cherry picked from commit 9f172f6fbd243759c808d97bd83c95e49325b2c9) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 07c07d55398b..be8555049c93 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1102,30 +1102,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = i915_ggtt_probe_hw(dev_priv); if (ret) - return ret; + goto err_perf; - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_ggtt_init_hw(dev_priv); if (ret) - return ret; + goto err_ggtt; ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; + goto err_ggtt; } pci_set_master(pdev); @@ -1136,7 +1138,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1154,7 +1156,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1187,13 +1189,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto out_ggtt; + goto err_ggtt; return 0; -out_ggtt: +err_ggtt: i915_ggtt_cleanup_hw(dev_priv); - +err_perf: + i915_perf_fini(dev_priv); return ret; } -- cgit From b4615730530be85fc45ab4631c2ad6d8e2d0b97d Mon Sep 17 00:00:00 2001 From: Gaurav K Singh Date: Tue, 17 Apr 2018 23:52:18 +0530 Subject: drm/i915/audio: Fix audio detection issue on GLK On Geminilake, sometimes audio card is not getting detected after reboot. This is a spurious issue happening on Geminilake. HW codec and HD audio controller link was going out of sync for which there was a fix in i915 driver but was not getting invoked for GLK. Extending this fix to GLK as well. Tested by Du,Wenkai on GLK board. Bspec: 21829 v2: Instead of checking GEN9_BC, BXT and GLK macros, use IS_GEN9 macro (Jani N) Cc: # b651bd2a3ae3 ("drm/i915/audio: Fix audio enumeration issue on BXT") Cc: Signed-off-by: Gaurav K Singh Reviewed-by: Abhay Kumar Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1523989338-29677-1-git-send-email-gaurav.k.singh@intel.com (cherry picked from commit 8221229046e862977ae93ec9d34aa583fbd10397) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 709d6ca68074..3ea566f99450 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); u32 tmp; - if (!IS_GEN9_BC(dev_priv)) + if (!IS_GEN9(dev_priv)) return; i915_audio_component_get_power(kdev); -- cgit From 7eb2c4dd54ff841f2fe509a84973eb25fa20bda2 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 16 Apr 2018 18:53:09 +0300 Subject: drm/i915: Fix LSPCON TMDS output buffer enabling from low-power state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LSPCON adapters in low-power state may ignore the first I2C write during TMDS output buffer enabling, resulting in a blank screen even with an otherwise enabled pipe. Fix this by reading back and validating the written value a few times. The problem was noticed on GLK machines with an onboard LSPCON adapter after entering/exiting DC5 power state. Doing an I2C read of the adapter ID as the first transaction - instead of the I2C write to enable the TMDS buffers - returns the correct value. Based on this we assume that the transaction itself is sent properly, it's only the adapter that is not ready for some reason to accept this first write after waking from low-power state. In my case the second I2C write attempt always succeeded. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105854 Cc: Clinton Taylor Cc: Ville Syrjälä Cc: stable@vger.kernel.org Signed-off-by: Imre Deak Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180416155309.11100-1-imre.deak@intel.com --- drivers/gpu/drm/drm_dp_dual_mode_helper.c | 39 +++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 02a50929af67..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, { uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; ssize_t ret; + int retry; if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) return 0; - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, - &tmds_oen, sizeof(tmds_oen)); - if (ret) { - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", - enable ? "enable" : "disable"); - return ret; + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; } - return 0; + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; } EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); -- cgit From 7407188489c62a7b5694bc75a6db2b82af94c9a5 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Thu, 19 Apr 2018 14:04:43 +0800 Subject: clocksource/imx-tpm: Correct -ETIME return condition check The additional brakects added to tpm_set_next_event's return value computation causes (int) forced type conversion NOT taking effect, and the incorrect value return will cause various system timer issue, like RCU stall etc.. Remove the additional brackets to make sure tpm_set_next_event always returns correct value. Fixes: 059ab7b82eec ("clocksource/drivers/imx-tpm: Add imx tpm timer support") Signed-off-by: Anson Huang Signed-off-by: Thomas Gleixner Acked-by: Dong Aisheng Cc: stable@vger.kernel.org Cc: daniel.lezcano@linaro.org Cc: Linux-imx@nxp.com Link: https://lkml.kernel.org/r/1524117883-2484-1-git-send-email-Anson.Huang@nxp.com --- drivers/clocksource/timer-imx-tpm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 05d97a6871d8..6c8318470b48 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -114,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)((next - now) <= 0) ? -ETIME : 0; + return (int)(next - now) <= 0 ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) -- cgit From d78fd7255881645fc645e23145d469385227170d Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 12 Apr 2018 16:37:09 -0400 Subject: drm/amd/display: Don't program bypass on linear regamma LUT Even though this is required for degamma since DCE HW only supports a couple predefined LUTs we can just program the LUT directly for regamma. This fixes dark screens which occurs when we program regamma to bypass while degamma is using srgb LUT. Signed-off-by: Harry Wentland Reviewed-by: Leo Li Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..25f064c01038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) lut = (struct drm_color_lut *)blob->data; lut_size = blob->length / sizeof(struct drm_color_lut); - if (__is_lut_linear(lut, lut_size)) { - /* Set to bypass if lut is set to linear */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } - gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; -- cgit From 84f8508f717268c333de8e472f351d6a7a487e51 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 17 Apr 2018 17:26:26 +0800 Subject: drm/amd/pp: Fix bug voltage can't be OD separately on VI Make sure to update the MCLK and SCLK flags when setting the VDDC flags due to dependencies. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..26fbeafc3c96 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } } static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, -- cgit From cc9e992dfb5bb48f59f3fbc1268d3f38d2c86ef3 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 17 Apr 2018 21:49:51 +0800 Subject: drm/amd/powerplay: header file interface to SMU update update vega12 smu interface. Signed-off-by: Kenneth Feng Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,8 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[15]; + uint32_t Reserved[14]; -- cgit From df3f126482dba8e00cdbfc8fc44a05a5a35b1704 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 16 Apr 2018 11:58:16 -0500 Subject: libnvdimm, of_pmem: use dev_to_node() instead of of_node_to_nid() Remove the direct dependency on of_node_to_nid() by using dev_to_node() instead. Any DT platform device will have its NUMA node id set when the device is created. With this, commit 291717b6fbdb ("libnvdimm, of_pmem: workaround OF_NUMA=n build error") can be reverted. Fixes: 717197608952 ("libnvdimm: Add device-tree based driver") Cc: Dan Williams Cc: Oliver O'Halloran Cc: linux-nvdimm@lists.01.org Signed-off-by: Rob Herring Signed-off-by: Dan Williams --- drivers/nvdimm/of_pmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) */ memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = of_node_to_nid(np); + ndr_desc.numa_node = dev_to_node(&pdev->dev); ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); -- cgit From f22acf82746dd3d75e06d7c5801926d3b9c50169 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 19 Apr 2018 15:07:42 -0700 Subject: Revert "libnvdimm, of_pmem: workaround OF_NUMA=n build error" With commit df3f126482db ("libnvdimm, of_pmem: use dev_to_node() instead of of_node_to_nid()") it is now possible to allow of_pmem to be built as a module as originally implemented. Signed-off-by: Dan Williams --- drivers/nvdimm/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -103,8 +103,7 @@ config NVDIMM_DAX Select Y if unsure config OF_PMEM - # FIXME: make tristate once OF_NUMA dependency removed - bool "Device-tree support for persistent memory regions" + tristate "Device-tree support for persistent memory regions" depends on OF default LIBNVDIMM help -- cgit From ef8423022324cf79bd1b41d8707c766461e7e555 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 19 Apr 2018 13:39:43 -0700 Subject: device-dax: allow MAP_SYNC to succeed MAP_SYNC is a nop for device-dax. Allow MAP_SYNC to succeed on device-dax to eliminate special casing between device-dax and fs-dax as to when the flag can be specified. Device-dax users already implicitly assume that they do not need to call fsync(), and this enables them to explicitly check for this capability. Cc: Fixes: b6fb293f2497 ("mm: Define MAP_SYNC and VM_SYNC flags") Signed-off-by: Dave Jiang Reviewed-by: Dan Williams Signed-off-by: Dan Williams --- drivers/dax/device.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dax-private.h" #include "dax.h" @@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, + .mmap_supported_flags = MAP_SYNC, }; static void dev_dax_release(struct device *dev) -- cgit From c5157b76869ba98c3a99a1982396437464e131a6 Mon Sep 17 00:00:00 2001 From: Ioan Nicu Date: Fri, 20 Apr 2018 14:55:49 -0700 Subject: rapidio: fix rio_dma_transfer error handling Some of the mport_dma_req structure members were initialized late inside the do_dma_request() function, just before submitting the request to the dma engine. But we have some error branches before that. In case of such an error, the code would return on the error path and trigger the calling of dma_req_free() with a req structure which is not completely initialized. This causes a NULL pointer dereference in dma_req_free(). This patch fixes these error branches by making sure that all necessary mport_dma_req structure members are initialized in rio_dma_transfer() immediately after the request structure gets allocated. Link: http://lkml.kernel.org/r/20180412150605.GA31409@nokia.com Fixes: bbd876adb8c72 ("rapidio: use a reference count for struct mport_dma_req") Signed-off-by: Ioan Nicu Tested-by: Alexander Sverdlin Acked-by: Alexandre Bounine Cc: Barry Wood Cc: Matt Porter Cc: Christophe JAILLET Cc: Logan Gunthorpe Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: Frank Kunz Cc: [4.6+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rapidio/devices/rio_mport_cdev.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, tx->callback = dma_xfer_callback; tx->callback_param = req; - req->dmach = chan; - req->sync = sync; req->status = DMA_IN_PROGRESS; - init_completion(&req->req_comp); kref_get(&req->refcount); cookie = dmaengine_submit(tx); @@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, if (!req) return -ENOMEM; - kref_init(&req->refcount); - ret = get_dma_channel(priv); if (ret) { kfree(req); return ret; } + chan = priv->dmach; + + kref_init(&req->refcount); + init_completion(&req->req_comp); + req->dir = dir; + req->filp = filp; + req->priv = priv; + req->dmach = chan; + req->sync = sync; /* * If parameter loc_addr != NULL, we are transferring data from/to @@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, xfer->offset, xfer->length); } - req->dir = dir; - req->filp = filp; - req->priv = priv; - chan = priv->dmach; - nents = dma_map_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); if (nents == 0) { -- cgit From 686c97ee29c886ee07d17987d0059874c5c3b5af Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:06 +0200 Subject: s390/qeth: fix error handling in adapter command callbacks Make sure to check both return code fields before(!) processing the command response. Otherwise we risk operating on invalid data. This matches an earlier fix for SETASSPARMS commands, see commit ad3cbf613329 ("s390/qeth: fix error handling in checksum cmd callback"). Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 85 +++++++++++++++++---------------------- 1 file changed, 37 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 04fefa5bb08d..36bc94088de1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3033,28 +3033,23 @@ static int qeth_send_startlan(struct qeth_card *card) return rc; } -static int qeth_default_setadapterparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "defadpcb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (!cmd->hdr.return_code) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - return 0; + return cmd->hdr.return_code; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; @@ -3062,7 +3057,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; - return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); + return 0; } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, @@ -3154,22 +3149,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_switch_info *sw_info; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; QETH_CARD_TEXT(card, 2, "qswiatcb"); - cmd = (struct qeth_ipa_cmd *) data; - sw_info = (struct qeth_switch_info *)reply->param; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - attrs = &cmd->data.setadapterparms.data.query_switch_attributes; - sw_info->capabilities = attrs->capabilities; - sw_info->settings = attrs->settings; - QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, - sw_info->settings); - } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); return 0; } @@ -4207,16 +4200,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); - cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); - - qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); - if (cmd->hdr.return_code) { + if (qeth_setadpparms_inspect_rc(cmd)) { QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } @@ -4286,18 +4276,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4328,13 +4318,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); + if (cmd->hdr.return_code) + return 0; + qeth_setadpparms_inspect_rc(cmd); - cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -4407,7 +4399,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4695,14 +4686,15 @@ out: static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; @@ -4796,21 +4788,18 @@ out: static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_query_card_info *card_info; - struct carrier_info *carrier_info; QETH_CARD_TEXT(card, 2, "qcrdincb"); - carrier_info = (struct carrier_info *)reply->param; - cmd = (struct qeth_ipa_cmd *)data; - card_info = &cmd->data.setadapterparms.data.card_info; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - carrier_info->card_type = card_info->card_type; - carrier_info->port_mode = card_info->port_mode; - carrier_info->port_speed = card_info->port_speed; - } + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; return 0; } -- cgit From 901e3f49facbd31b2b3d1786637b4a35e1022e9b Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:07 +0200 Subject: s390/qeth: avoid control IO completion stalls For control IO, qeth currently tracks the index of the buffer that it expects to complete the next IO on each qeth_channel. If the channel presents an IRQ while this buffer has not yet completed, no completion processing for _any_ completed buffer takes place. So if the 'next buffer' is skipped for any sort of reason* (eg. when it is released due to error conditions, before the IO is started), the buffer obviously won't switch to PROCESSED until it is eventually allocated for a _different_ IO and completes. Until this happens, all completion processing on that channel stalls and pending requests possibly time out. As a fix, remove the whole 'next buffer' logic and simply process any IO buffer right when it completes. A channel will never have more than one IO pending, so there's no risk of processing out-of-sequence. *Note: currently just one location in the code really handles this problem, by advancing the 'next' index manually. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 -- drivers/s390/net/qeth_core_main.c | 22 +++++----------------- 2 files changed, 5 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4326715dc13e..78b98b3e7efa 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -557,7 +557,6 @@ enum qeth_prot_versions { enum qeth_cmd_buffer_state { BUF_STATE_FREE, BUF_STATE_LOCKED, - BUF_STATE_PROCESSED, }; enum qeth_cq { @@ -601,7 +600,6 @@ struct qeth_channel { struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; atomic_t irq_pending; int io_buf_no; - int buf_no; }; /** diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 36bc94088de1..5ec47c6ebaa6 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -818,7 +818,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); - channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); @@ -924,7 +923,6 @@ static int qeth_setup_channel(struct qeth_channel *channel) kfree(channel->iob[cnt].data); return -ENOMEM; } - channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); @@ -1100,11 +1098,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; - struct qeth_cmd_buffer *buffer; struct qeth_channel *channel; struct qeth_card *card; struct qeth_cmd_buffer *iob; - __u8 index; if (__qeth_check_irb_error(cdev, intparm, irb)) return; @@ -1182,25 +1178,18 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_RCD_DONE; goto out; } - if (intparm) { - buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - buffer->state = BUF_STATE_PROCESSED; - } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - iob = channel->iob; - index = channel->buf_no; - while (iob[index].state == BUF_STATE_PROCESSED) { - if (iob[index].callback != NULL) - iob[index].callback(channel, iob + index); - - index = (index + 1) % QETH_CMD_BUFFER_NO; + if (intparm) { + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + if (iob->callback) + iob->callback(iob->channel, iob); } - channel->buf_no = index; + out: wake_up(&card->wait_q); return; @@ -2214,7 +2203,6 @@ time_err: error: atomic_set(&card->write.irq_pending, 0); qeth_release_buffer(iob->channel, iob); - card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; -- cgit From a936b1ef37ce1e996533878f4b23944f9444dcdf Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:08 +0200 Subject: s390/qeth: handle failure on workqueue creation Creating the global workqueue during driver init may fail, deal with it. Also, destroy the created workqueue on any subsequent error. Fixes: 0f54761d167f ("qeth: Support VEPA mode") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 5ec47c6ebaa6..9a08b545d018 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6540,10 +6540,14 @@ static int __init qeth_core_init(void) mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); + if (!qeth_wq) { + rc = -ENOMEM; + goto out_err; + } rc = qeth_register_dbf_views(); if (rc) - goto out_err; + goto dbf_err; qeth_core_root_dev = root_device_register("qeth"); rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) @@ -6580,6 +6584,8 @@ slab_err: root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); +dbf_err: + destroy_workqueue(qeth_wq); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; -- cgit From bcacfcbc82b4235d280ed9b067aa4567f4a0c756 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:09 +0200 Subject: s390/qeth: fix MAC address update sequence When changing the MAC address on a L2 qeth device, current code first unregisters the old address, then registers the new one. If HW rejects the new address (or the IO fails), the device ends up with no operable address at all. Re-order the code flow so that the old address only gets dropped if the new address was registered successfully. While at it, add logic to catch some corner-cases. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 55 +++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2ad6f12f3d49..36f9b74848fe 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -121,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) QETH_CARD_TEXT(card, 2, "L2Setmac"); rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - ether_addr_copy(card->dev->dev_addr, mac); dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); + "MAC address %pM successfully registered on device %s\n", + mac, card->dev->name); } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (rc) { case -EEXIST: dev_warn(&card->gdev->dev, @@ -142,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) return rc; } -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? @@ -519,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; int rc = 0; QETH_CARD_TEXT(card, 3, "setmac"); @@ -530,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -EOPNOTSUPP; } QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } - rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == -ENOENT)) - rc = qeth_l2_send_setmac(card, addr->sa_data); - return rc ? -EINVAL : 0; + + if (!qeth_card_hw_is_reachable(card)) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; + } + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + return rc; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + qeth_l2_remove_mac(card, old_addr); + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + return 0; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1067,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (card->info.type != QETH_CARD_TYPE_OSN) - qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); + if (card->info.type != QETH_CARD_TYPE_OSN && + !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && -- cgit From db71bbbd11a4d314f0fa3fbf3369b71cf33ce33c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:10 +0200 Subject: s390/qeth: fix request-side race during cmd IO timeout Submitting a cmd IO request (usually on the WRITE device, but for IDX also on the READ device) is currently done with ccw_device_start() and a manual timeout in the caller. On timeout, the caller cleans up the related resources (eg. IO buffer). But 1) the IO might still be active and utilize those resources, and 2) when the IO completes, qeth_irq() will attempt to clean up the same resources again. Instead of introducing additional resource locking, switch to ccw_device_start_timeout() to ensure IO termination after timeout, and let the IRQ handler alone deal with cleaning up after a request. This also removes a stray write->irq_pending reset from clear_ipacmd_list(). The routine doesn't terminate any pending IO on the WRITE device, so this should be handled properly via IO timeout in the IRQ handler. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 51 ++++++++++++++++++++------------------- drivers/s390/net/qeth_core_mpc.h | 12 +++++++++ drivers/s390/net/qeth_l2_main.c | 4 +-- 3 files changed, 40 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a08b545d018..9b22d5d496ae 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); - atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -1098,14 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; + struct qeth_cmd_buffer *iob = NULL; struct qeth_channel *channel; struct qeth_card *card; - struct qeth_cmd_buffer *iob; - - if (__qeth_check_irb_error(cdev, intparm, irb)) - return; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) @@ -1123,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } + + if (qeth_intparm_is_iob(intparm)) + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + + if (__qeth_check_irb_error(cdev, intparm, irb)) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_release_buffer(iob->channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return; + } + atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) @@ -1146,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, /* we don't have to handle this further */ intparm = 0; } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { @@ -1184,11 +1195,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - if (intparm) { - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (iob->callback) - iob->callback(iob->channel, iob); - } + if (iob && iob->callback) + iob->callback(iob->channel, iob); out: wake_up(&card->wait_q); @@ -1859,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1877,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; @@ -1931,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1953,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); @@ -2155,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, event_timeout); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " @@ -2188,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } - if (reply->rc == -EIO) - goto error; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2200,9 +2204,6 @@ time_err: list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); -error: - atomic_set(&card->write.irq_pending, 0); - qeth_release_buffer(iob->channel, iob); rc = reply->rc; qeth_put_reply(reply); return rc; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 619f897b4bb0..f4d1ec0b8f5a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_HALT_CHANNEL_PARM -11 #define QETH_RCD_PARM -12 +static inline bool qeth_intparm_is_iob(unsigned long intparm) +{ + switch (intparm) { + case QETH_CLEAR_CHANNEL_PARM: + case QETH_HALT_CHANNEL_PARM: + case QETH_RCD_PARM: + case 0: + return false; + } + return true; +} + /*****************************************************************************/ /* IP Assist related definitions */ /*****************************************************************************/ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 36f9b74848fe..b8079f2a65b3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1345,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " -- cgit From b7493e91c11a757cf0f8ab26989642ee4bb2c642 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:11 +0200 Subject: s390/qeth: use Read device to query hypervisor for MAC For z/VM NICs, qeth needs to consider which of the three CCW devices in an MPC group it uses for requesting a managed MAC address. On the Base device, the hypervisor returns a default MAC which is pre-assigned when creating the NIC (this MAC is also returned by the READ MAC primitive). Querying any other device results in the allocation of an additional MAC address. For consistency with READ MAC and to avoid using up more addresses than necessary, it is preferable to use the NIC's default MAC. So switch the the diag26c over to using a NIC's Read device, which should always be identical to the Base device. Fixes: ec61bd2fd2a2 ("s390/qeth: use diag26c to get MAC address on L2") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9b22d5d496ae..dffd820731f2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4835,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_DDEV(card), &id); + ccw_device_get_id(CARD_RDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; -- cgit From 5e391dc5a8d801a2410d0032ad4a428d1d61800c Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Apr 2018 22:49:09 +0300 Subject: net: ethernet: ti: cpsw: fix tx vlan priority mapping The CPDMA_TX_PRIORITY_MAP in real is vlan pcp field priority mapping register and basically replaces vlan pcp field for tagged packets. So, set it to be 1:1 mapping. Otherwise, it will cause unexpected change of egress vlan tagged packets, like prio 2 -> prio 5. Fixes: e05107e6b747 ("net: ethernet: ti: cpsw: add multi queue support") Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 30371274409d..74f828412055 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,7 +129,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x01234567 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_RX_VLAN_ENCAP BIT(2) -- cgit From 660e309ddd6aa99bb4d2a859c4a0b56965e744ef Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Fri, 20 Apr 2018 14:25:32 -0500 Subject: ibmvnic: Clean actual number of RX or TX pools Avoid using value stored in the login response buffer when cleaning TX and RX buffer pools since these could be inconsistent depending on the device state. Instead use the field in the driver's private data that tracks the number of active pools. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 2df01ad98df7..6e8d6a6f6aaf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1128,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter) if (!adapter->rx_pool) return; - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_scrqs = adapter->num_active_rx_pools; rx_entries = adapter->req_rx_add_entries_per_subcrq; /* Free any remaining skbs in the rx buffer pools */ @@ -1177,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!adapter->tx_pool || !adapter->tso_pool) return; - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; /* Free any remaining skbs in the tx buffer pools */ for (i = 0; i < tx_scrqs; i++) { -- cgit From ddea788c63094f7c483783265563dd5b50052e28 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 22 Apr 2018 19:11:50 +0800 Subject: bonding: do not set slave_dev npinfo before slave_enable_netpoll in bond_enslave After Commit 8a8efa22f51b ("bonding: sync netpoll code with bridge"), it would set slave_dev npinfo in slave_enable_netpoll when enslaving a dev if bond->dev->npinfo was set. However now slave_dev npinfo is set with bond->dev->npinfo before calling slave_enable_netpoll. With slave_dev npinfo set, __netpoll_setup called in slave_enable_netpoll will not call slave dev's .ndo_netpoll_setup(). It causes that the lower dev of this slave dev can't set its npinfo. One way to reproduce it: # modprobe bonding # brctl addbr br0 # brctl addif br0 eth1 # ifconfig bond0 192.168.122.1/24 up # ifenslave bond0 eth2 # systemctl restart netconsole # ifenslave bond0 br0 # ifconfig eth2 down # systemctl restart netconsole The netpoll won't really work. This patch is to remove that slave_dev npinfo setting in bond_enslave(). Fixes: 8a8efa22f51b ("bonding: sync netpoll code with bridge") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b7b113018853..718e4914e3a0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } /* switch(bond_mode) */ #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond->dev->npinfo; - if (slave_dev->npinfo) { + if (bond->dev->npinfo) { if (slave_enable_netpoll(new_slave)) { netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); res = -EBUSY; -- cgit From b6a930fa88083b41d26ddf1cab95cbd740936c22 Mon Sep 17 00:00:00 2001 From: Jingju Hou Date: Mon, 23 Apr 2018 15:22:49 +0800 Subject: net: phy: marvell: clear wol event before setting it If WOL event happened once, the LED[2] interrupt pin will not be cleared unless we read the CSISR register. If interrupts are in use, the normal interrupt handling will clear the WOL event. Let's clear the WOL event before enabling it if !phy_interrupt_is_valid(). Signed-off-by: Jingju Hou Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c22e8e383247..25e2a099b71c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) goto error; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, MII_88E1318S_PHY_CSIER_WOL_EIE); -- cgit From a49e2f5d5fb141884452ddb428f551b123d436b5 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 23 Apr 2018 16:38:27 +0200 Subject: pppoe: check sockaddr length in pppoe_connect() We must validate sockaddr_len, otherwise userspace can pass fewer data than we expect and we end up accessing invalid data. Fixes: 224cf5ad14c0 ("ppp: Move the PPP drivers") Reported-by: syzbot+4f03bdf92fdf9ef5ddab@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/pppoe.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 1483bc7b01e1..7df07337d69c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppox)) + goto end; + if (sp->sa_protocol != PX_PROTO_OE) goto end; -- cgit From 4d945663a6a0acf3cbe45940503f2eb9584bfee7 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:08 -0500 Subject: amd-xgbe: Add pre/post auto-negotiation phy hooks Add hooks to the driver auto-negotiation (AN) flow to allow the different phy implementations to perform any steps necessary to improve AN. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 16 ++++++++++++++-- drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 072b9f664597..e3d361e242aa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -437,6 +437,9 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) static void xgbe_an_restart(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -453,6 +456,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) static void xgbe_an_disable(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -637,11 +643,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) return XGBE_AN_NO_LINK; } - xgbe_an73_disable(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_an73_restart(pdata); + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; } @@ -820,6 +826,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -903,6 +912,9 @@ again: pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ad102c8bac7b..fa0b51ea1b95 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -833,6 +833,7 @@ struct xgbe_hw_if { /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: + * an_pre, an_post * kr_training_pre, kr_training_post */ struct xgbe_phy_impl_if { @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); -- cgit From 96f4d430c507ed4856048c2dc9c1a2ea5b5e74e4 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:17 -0500 Subject: amd-xgbe: Improve KR auto-negotiation and training Update xgbe-phy-v2.c to make use of the auto-negotiation (AN) phy hooks to improve the ability to successfully complete Clause 73 AN when running at 10gbps. Hardware can sometimes have issues with CDR lock when the AN DME page exchange is being performed. The AN and KR training hooks are used as follows: - The pre AN hook is used to disable CDR tracking in the PHY so that the DME page exchange can be successfully and consistently completed. - The post KR training hook is used to re-enable the CDR tracking so that KR training can successfully complete. - The post AN hook is used to check for an unsuccessful AN which will increase a CDR tracking enablement delay (up to a maximum value). Add two debugfs entries to allow control over use of the CDR tracking workaround. The debugfs entries allow the CDR tracking workaround to be disabled and determine whether to re-enable CDR tracking before or after link training has been initiated. Also, with these changes the receiver reset cycle that is performed during the link status check can be performed less often. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 8 ++ drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 16 ++++ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 + drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 8 +- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 2 + drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 125 ++++++++++++++++++++++++++- drivers/net/ethernet/amd/xgbe/xgbe.h | 4 + 7 files changed, 160 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ea72ef11a55..d272dc6984ac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1321,6 +1321,10 @@ #define MDIO_VEND2_AN_STAT 0x8002 #endif +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1369,6 +1373,10 @@ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7d128be61310..b91143947ed2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) "debugfs_create_file failed\n"); } + if (pdata->vdata->an_cdr_workaround) { + pfile = debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + + pfile = debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + } + kfree(buf); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 795e556d4a3f..441d0973957b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; ret = pdata->phy_if.phy_init(pdata); if (ret) return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index e3d361e242aa..1b45cd73a258 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -432,6 +432,8 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); } @@ -511,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); } return XGBE_AN_PAGE_RECEIVED; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index eb23f9ba1a9a..82d1f416ee2a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct xgbe_version_data xgbe_v2b = { @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3304a291aa96..b48efc04c4da 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -147,6 +147,14 @@ /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, @@ -355,6 +363,10 @@ struct xgbe_phy_data { unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; }; /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ @@ -2361,7 +2373,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 1; /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++) { + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } @@ -2669,6 +2681,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) return true; } +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + usleep_range(phy_data->phy_cdr_delay, + phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2680,6 +2789,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* Power off the PHY */ xgbe_phy_power_off(pdata); @@ -2712,6 +2824,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: @@ -3019,6 +3134,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } } + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + /* Register for driving external PHYs */ mii = devm_mdiobus_alloc(pdata->dev); if (!mii) { @@ -3071,4 +3188,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index fa0b51ea1b95..95d4b56448c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -994,6 +994,7 @@ struct xgbe_version_data { unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; }; struct xgbe_vxlan_data { @@ -1262,6 +1263,9 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; + + bool debugfs_an_cdr_workaround; + bool debugfs_an_cdr_track_early; }; /* Function prototypes*/ -- cgit From 117df655f8ed51adb6e6b163812a06ebeae9f453 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:34 -0500 Subject: amd-xgbe: Only use the SFP supported transceiver signals The SFP eeprom indicates the transceiver signals (Rx LOS, Tx Fault, etc.) that it supports. Update the driver to include checking the eeprom data when deciding whether to use a transceiver signal. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 71 ++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index b48efc04c4da..aac884314000 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -253,6 +253,10 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) @@ -332,6 +336,7 @@ struct xgbe_phy_data { unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; @@ -986,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) phy_data->sfp_phy_avail = 1; } +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return true; + + return false; +} + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1031,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) return; + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + if (xgbe_phy_sfp_parse_quirks(pdata)) return; @@ -1196,7 +1248,6 @@ put: static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int gpio_input; u8 gpio_reg, gpio_ports[2]; int ret; @@ -1211,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) return; } - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; - - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { - /* No GPIO, just assume the module is present for now */ - phy_data->sfp_mod_absent = 0; - } else { - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) - phy_data->sfp_mod_absent = 0; - } - - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) - phy_data->sfp_rx_los = 1; + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) - phy_data->sfp_tx_fault = 1; + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) -- cgit From 9cf2f437ca5b39828984064fad213e68fc17ef11 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 24 Apr 2018 14:33:37 +0800 Subject: team: fix netconsole setup over team MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same fix in Commit dbe173079ab5 ("bridge: fix netconsole setup over bridge") is also needed for team driver. While at it, remove the unnecessary parameter *team from team_port_enable_netpoll(). v1->v2: - fix it in a better way, as does bridge. Fixes: 0fb52a27a04a ("team: cleanup netpoll clode") Reported-by: João Avelino Bellomo Filho Signed-off-by: Xin Long Signed-off-by: David S. Miller --- drivers/net/team/team.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index acbe84967834..ddb6bf85a59c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1072,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int __team_port_enable_netpoll(struct team_port *port) { struct netpoll *np; int err; - if (!team->dev->npinfo) - return 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; @@ -1093,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) return err; } +static int team_port_enable_netpoll(struct team_port *port) +{ + if (!port->team->dev->npinfo) + return 0; + + return __team_port_enable_netpoll(port); +} + static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; @@ -1107,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port) kfree(np); } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team_port *port) { return 0; } @@ -1221,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_vids_add; } - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1918,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = __team_port_enable_netpoll(port); if (err) { __team_netpoll_cleanup(team); break; -- cgit From 39035bfdc3f18987aba04165060bfbfa10ffc1cd Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 27 Mar 2018 15:21:48 +0100 Subject: ixgbevf: ensure xdp_ring resources are free'd on error exit The current error handling for failed resource setup for xdp_ring data is a break out of the loop and returning 0 indicated everything was OK, when in fact it is not. Fix this by exiting via the error exit label err_setup_tx that will clean up the resources correctly and return and error status. Detected by CoverityScan, CID#1466879 ("Logically dead code") Fixes: 21092e9ce8b1 ("ixgbevf: Add support for XDP_TX action") Signed-off-by: Colin Ian King Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3d9033f26eff..e3d04f226d57 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) if (!err) continue; hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); - break; + goto err_setup_tx; } return 0; -- cgit From 2707df9773cd2cb8b0f35b8592431b301da9d352 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Fri, 30 Mar 2018 17:06:52 -0700 Subject: igb: Fix the transmission mode of queue 0 for Qav mode When Qav mode is enabled, queue 0 should be kept on Stream Reservation mode. From the i210 datasheet, section 8.12.19: "Note: Queue0 QueueMode must be set to 1b when TransmitMode is set to Qav." ("QueueMode 1b" represents the Stream Reservation mode) The solution is to give queue 0 the all the credits it might need, so it has priority over queue 1. A situation where this can happen is when cbs is "installed" only on queue 1, leaving queue 0 alone. For example: $ tc qdisc replace dev enp2s0 handle 100: parent root mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 0 $ tc qdisc replace dev enp2s0 parent 100:2 cbs locredit -1470 \ hicredit 30 sendslope -980000 idleslope 20000 offload 1 Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c1c0bc30a16d..cce7ada89255 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable) { + if (enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !enable) { + /* max "linkspeed" idleslope in kbps */ + idleslope = 1000000; + hicredit = ETH_FRAME_LEN; + } + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); -- cgit From d332a38c9519e0208f04da465bc88427db3485f6 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Tue, 10 Apr 2018 10:49:49 -0700 Subject: ice: Fix initialization for num_nodes_added ice_sched_add_nodes_to_layer is used recursively, and so we start with num_nodes_added being 0. This way, in case of an error or if num_nodes is NULL, the function just returns 0 to indicate that no nodes were added. Fixes: 5513b920a4f7 ("ice: Update Tx scheduler tree for VSI multi-Tx queue support") Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index f16ff3e4a840..2e6c1d92cc88 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, u16 num_added = 0; u32 temp; + *num_nodes_added = 0; + if (!num_nodes) return status; if (!parent || layer < hw->sw_entry_point_layer) return ICE_ERR_PARAM; - *num_nodes_added = 0; - /* max children per node per layer */ max_child_nodes = le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); -- cgit From 34357a90d5ca8228df4f88b21197f970285b209b Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Wed, 11 Apr 2018 10:41:47 -0700 Subject: ice: Fix incorrect comment for action type Action type 5 defines large action generic values. Fix comment to reflect that better. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5b13ca1bd85f..7dc5f045e969 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) - /* Action type = 5 - Large Action */ + /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) -- cgit From 30d84397affb0fcb11beaf049caabfcb1dac65a6 Mon Sep 17 00:00:00 2001 From: Ben Shelton Date: Wed, 11 Apr 2018 12:21:33 -0700 Subject: ice: Do not check INTEVENT bit for OICR interrupts According to the hardware spec, checking the INTEVENT bit isn't a reliable way to detect if an OICR interrupt has occurred. This is because this bit can be cleared by the hardware/firmware before the interrupt service routine has run. So instead, just check for OICR events every time. Fixes: 940b61af02f4 ("ice: Initialize PF and setup miscellaneous interrupt") Signed-off-by: Ben Shelton Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 2 -- drivers/net/ethernet/intel/ice/ice_main.c | 4 ---- 2 files changed, 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1b9e2ef48a9d..499904874b3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -121,8 +121,6 @@ #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_INTEVENT_S 0 -#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S) #define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) #define PFINT_OICR_CPM_RDY_S 15 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 210b7910f1cd..5299caf55a7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); - if (!(oicr & PFINT_OICR_INTEVENT_M)) - goto ena_intr; - if (oicr & PFINT_OICR_GRST_M) { u32 reset; /* we have a reset warning */ @@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; -ena_intr: /* re-enable interrupt causes that are not handled during this pass */ wr32(hw, PFINT_OICR_ENA, ena_mask); if (!test_bit(__ICE_DOWN, pf->state)) { -- cgit From f8d6203780b73c07dc49ee421fedae8edb76b6e4 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 24 Apr 2018 17:09:30 +0100 Subject: sfc: ARFS filter IDs Associate an arbitrary ID with each ARFS filter, allowing to properly query for expiry. The association is maintained in a hash table, which is protected by a spinlock. v3: fix build warnings when CONFIG_RFS_ACCEL is disabled (thanks lkp-robot). v2: fixed uninitialised variable (thanks davem and lkp-robot). Fixes: 3af0f34290f6 ("sfc: replace asynchronous filter operations") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 80 +++++++++++-------- drivers/net/ethernet/sfc/efx.c | 143 ++++++++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx.h | 21 +++++ drivers/net/ethernet/sfc/farch.c | 41 ++++++++-- drivers/net/ethernet/sfc/net_driver.h | 36 +++++++++ drivers/net/ethernet/sfc/rx.c | 62 +++++++++++++-- 6 files changed, 337 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 83ce229f4eb7..63036d9bf3e6 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx) atomic_set(&efx->active_queues, 0); } -static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) -{ - if ((left->match_flags ^ right->match_flags) | - ((left->flags ^ right->flags) & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) - return false; - - return memcmp(&left->outer_vid, &right->outer_vid, - sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; -} - -static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) -{ - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, - (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, - 0); - /* XXX should we randomise the initval? */ -} - /* Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are @@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, goto out_unlock; match_pri = rc; - hash = efx_ef10_filter_hash(spec); + hash = efx_filter_spec_hash(spec); is_mc_recip = efx_filter_is_mc_recipient(spec); if (is_mc_recip) bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); @@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, if (!saved_spec) { if (ins_index < 0) ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { + } else if (efx_filter_spec_equal(spec, saved_spec)) { if (spec->priority < saved_spec->priority && spec->priority != EFX_FILTER_PRI_AUTO) { rc = -EPERM; @@ -4762,27 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int filter_idx) { + struct efx_filter_spec *spec, saved_spec; struct efx_ef10_filter_table *table; - struct efx_filter_spec *spec; - bool ret; + struct efx_arfs_rule *rule = NULL; + bool ret = true, force = false; + u16 arfs_id; down_read(&efx->filter_sem); table = efx->filter_state; down_write(&table->lock); spec = efx_ef10_filter_entry_spec(table, filter_idx); - if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { - ret = true; + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) goto out_unlock; - } - if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { - ret = false; - goto out_unlock; + spin_lock_bh(&efx->rps_hash_lock); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always return 0 to ARFS. */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, spec); + if (!rule) + /* ARFS table doesn't know of this filter, so remove it */ + goto expire; + arfs_id = rule->arfs_id; + ret = efx_rps_check_rule(rule, filter_idx, &force); + if (force) + goto expire; + if (!ret) { + spin_unlock_bh(&efx->rps_hash_lock); + goto out_unlock; + } } - + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) + ret = false; + else if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; +expire: + saved_spec = *spec; /* remove operation will kfree spec */ + spin_unlock_bh(&efx->rps_hash_lock); + /* At this point (since we dropped the lock), another thread might queue + * up a fresh insertion request (but the actual insertion will be held + * up by our possession of the filter table lock). In that case, it + * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that + * the rule is not removed by efx_rps_hash_del() below. + */ ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, filter_idx, true) == 0; + /* While we can't safely dereference rule (we dropped the lock), we can + * still test it for NULL. + */ + if (ret && rule) { + /* Expiring, so remove entry from ARFS table */ + spin_lock_bh(&efx->rps_hash_lock); + efx_rps_hash_del(efx, &saved_spec); + spin_unlock_bh(&efx->rps_hash_lock); + } out_unlock: up_write(&table->lock); up_read(&efx->filter_sem); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 692dd729ee2a..a4ebd8715494 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx, mutex_init(&efx->mac_lock); #ifdef CONFIG_RFS_ACCEL mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); #endif efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; @@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx) { int i; +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); @@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because * (a) this is an infrequent control-plane operation and (b) n is small (max 64) */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a3140e16fcef..3f759ebdcf10 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {} #endif bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +/* @new is written to indicate if entry was newly added (true) or if an old + * entry was found and returned (false). + */ +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); +#endif + /* RSS contexts */ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 7174ef5e5c5e..c72adf8b52ea 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, { struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; - bool ret = false; + bool ret = false, force = false; + u16 arfs_id; down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT && - rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, - flow_id, 0)) { - efx_farch_filter_table_clear_entry(efx, table, index); - ret = true; + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_rps_check_rule(rule, index, &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } } - +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); up_write(&state->lock); return ret; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index eea3808b3f25..65568925c3ef 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -734,6 +734,35 @@ struct efx_rss_context { }; #ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + /** * struct efx_async_filter_insertion - Request to asynchronously insert a filter * @net_dev: Reference to the netdevice @@ -873,6 +902,10 @@ struct efx_async_filter_insertion { * @rps_expire_channel's @rps_flow_id * @rps_slot_map: bitmap of in-flight entries in @rps_slot * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1029,6 +1062,9 @@ struct efx_nic { unsigned int rps_expire_index; unsigned long rps_slot_map; struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; #endif atomic_t active_queues; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 9c593c661cbf..64a94f242027 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -834,9 +834,29 @@ static void efx_filter_rfs_work(struct work_struct *data) struct efx_nic *efx = netdev_priv(req->net_dev); struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; int rc; rc = efx->type->filter_insert(efx, &req->spec, true); + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } if (rc >= 0) { /* Remember this so we can check whether to expire the filter * later. @@ -848,18 +868,18 @@ static void efx_filter_rfs_work(struct work_struct *data) if (req->spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); else netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); } /* Release references */ @@ -872,8 +892,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; struct flow_keys fk; int slot_idx; + bool new; int rc; /* find a free slot */ @@ -926,12 +948,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, req->spec.rem_port = fk.ports.src; req->spec.loc_port = fk.ports.dst; + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ dev_hold(req->net_dev = net_dev); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; schedule_work(&req->work); - return 0; + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); out_clear: clear_bit(slot_idx, &efx->rps_slot_map); return rc; -- cgit From d6fef10c750e64f248543d2eee7c86a4a019f7ec Mon Sep 17 00:00:00 2001 From: Md Fahad Iqbal Polash Date: Mon, 16 Apr 2018 10:07:03 -0700 Subject: ice: Fix insufficient memory issue in ice_aq_manage_mac_read For the MAC read operation, the device can return up to two (LAN and WoL) MAC addresses. Without access to adequate memory, the device will return an error. Fixed this by allocating the right amount of memory. Also, logic to detect and copy the LAN MAC address into the port_info structure has been added. Note that the WoL MAC address is ignored currently as the WoL feature isn't supported yet. Fixes: dc49c7723676 ("ice: Get MAC/PHY/link info and scheduler topology") Signed-off-by: Md Fahad Iqbal Polash Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 21977ec984c4..71d032cc5fa7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_aq_desc desc; enum ice_status status; u16 flags; + u8 i; cmd = &desc.params.mac_read; @@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, return ICE_ERR_CFG; } - ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); - ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); + /* A single port can report up to two (LAN and WoL) addresses */ + for (i = 0; i < cmd->num_addr; i++) + if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { + ether_addr_copy(hw->port_info->mac.lan_addr, + resp[i].mac_addr); + ether_addr_copy(hw->port_info->mac.perm_addr, + resp[i].mac_addr); + break; + } + return 0; } @@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get port MAC information */ - mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); - mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); + /* Get MAC information */ + /* A single port can report up to two (LAN and WoL) addresses */ + mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, + sizeof(struct ice_aqc_manage_mac_read_resp), + GFP_KERNEL); + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY; -- cgit