summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/i2c/busses/Kconfig20
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-altera.c511
-rw-r--r--drivers/i2c/busses/i2c-stm32.h20
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c18
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c972
34 files changed, 1768 insertions, 171 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 12e71bbfd222..103635ab784c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -76,7 +76,7 @@
extern int amdgpu_modeset;
extern int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
-extern unsigned amdgpu_gart_size;
+extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
extern int amdgpu_benchmarking;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index fb6e5dbd5a03..309f2419c6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -155,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
{
return (struct kfd2kgd_calls *)&kfd2kgd;
- return (struct kfd2kgd_calls *)&kfd2kgd;
}
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 269b835571eb..60d8bedb694d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1079,6 +1079,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
GFP_KERNEL);
p->num_post_dep_syncobjs = 0;
+ if (!p->post_dep_syncobjs)
+ return -ENOMEM;
+
for (i = 0; i < num_deps; ++i) {
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_dep_syncobjs[i])
@@ -1150,7 +1153,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
amdgpu_job_free_resources(job);
- amdgpu_cs_parser_fini(p, 0, true);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
@@ -1208,10 +1210,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
r = amdgpu_cs_submit(&parser, cs);
- if (r)
- goto out;
- return 0;
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1a459ac63df4..e630d918fefc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1062,11 +1062,11 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
- if (amdgpu_gart_size < 32) {
+ if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
/* gart size must be greater or equal to 32M */
dev_warn(adev->dev, "gart size (%d) too small\n",
amdgpu_gart_size);
- amdgpu_gart_size = 32;
+ amdgpu_gart_size = -1;
}
if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
@@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
goto err;
}
- r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
- if (r) {
- DRM_ERROR("%p bind failed\n", bo->shadow);
- goto err;
- }
-
r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index e39ec981b11c..0f16986ec5bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -76,7 +76,7 @@
int amdgpu_vram_limit = 0;
int amdgpu_vis_vram_limit = 0;
-unsigned amdgpu_gart_size = 256;
+int amdgpu_gart_size = -1; /* auto */
int amdgpu_gtt_size = -1; /* auto */
int amdgpu_moverate = -1; /* auto */
int amdgpu_benchmarking = 0;
@@ -128,7 +128,7 @@ module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc.)");
+MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 94c1e2e8e34c..f4370081f6e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -57,18 +57,6 @@
*/
/**
- * amdgpu_gart_set_defaults - set the default gart_size
- *
- * @adev: amdgpu_device pointer
- *
- * Set the default gart_size based on parameters and available VRAM.
- */
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev)
-{
- adev->mc.gart_size = (uint64_t)amdgpu_gart_size << 20;
-}
-
-/**
* amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index d4cce6936200..afbe803b1a13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -56,7 +56,6 @@ struct amdgpu_gart {
const struct amdgpu_gart_funcs *gart_funcs;
};
-void amdgpu_gart_set_defaults(struct amdgpu_device *adev);
int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 9e05e257729f..0d15eb7d31d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -108,10 +108,10 @@ bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem)
*
* Allocate the address space for a node.
*/
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+static int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_mem_reg *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
@@ -143,12 +143,8 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
fpfn, lpfn, mode);
spin_unlock(&mgr->lock);
- if (!r) {
+ if (!r)
mem->start = node->start;
- if (&tbo->mem == mem)
- tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
- tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
- }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 4bdd851f56d0..538e5f27d120 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -221,8 +221,9 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
spin_lock_init(&adev->irq.lock);
- /* Disable vblank irqs aggressively for power-saving */
- adev->ddev->vblank_disable_immediate = true;
+ if (!adev->enable_virtual_display)
+ /* Disable vblank irqs aggressively for power-saving */
+ adev->ddev->vblank_disable_immediate = true;
r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e7e899190bef..9e495da0bb03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
- places[c].lpfn = 0;
+ if (flags & AMDGPU_GEM_CREATE_SHADOW)
+ places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ else
+ places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC |
@@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow)
return 0;
- bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
- amdgpu_ttm_placement_init(adev, &placement,
- placements, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC);
+ memset(&placements, 0, sizeof(placements));
+ amdgpu_ttm_placement_init(adev, &placement, placements,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement,
bo->tbo.resv,
0,
@@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
+ memset(&placements, 0, sizeof(placements));
+ amdgpu_ttm_placement_init(adev, &placement, placements,
+ domain, parent_flags);
- amdgpu_ttm_placement_init(adev, &placement,
- placements, domain, flags);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, init_value, bo_ptr);
+ r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
+ parent_flags, sg, &placement, resv,
+ init_value, bo_ptr);
if (r)
return r;
- if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
- if (!resv) {
- r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
- WARN_ON(r != 0);
- }
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if (!resv)
+ WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
+ NULL));
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
if (!resv)
- ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
+ reservation_object_unlock((*bo_ptr)->tbo.resv);
if (r)
amdgpu_bo_unref(bo_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6c5646b48d1a..5ce65280b396 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -170,6 +170,16 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned irq_type)
{
int r;
+ int sched_hw_submission = amdgpu_sched_hw_submission;
+
+ /* Set the hw submission limit higher for KIQ because
+ * it's used for a number of gfx/compute tasks by both
+ * KFD and KGD which may have outstanding fences and
+ * it doesn't really use the gpu scheduler anyway;
+ * KIQ tasks get submitted directly to the ring.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ sched_hw_submission = max(sched_hw_submission, 256);
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
@@ -178,8 +188,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring,
- amdgpu_sched_hw_submission);
+ r = amdgpu_fence_driver_init_ring(ring, sched_hw_submission);
if (r)
return r;
}
@@ -218,8 +227,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
return r;
}
- ring->ring_size = roundup_pow_of_two(max_dw * 4 *
- amdgpu_sched_hw_submission);
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8b2c294f6f79..7ef6c28a34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -761,35 +761,11 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg);
}
-static int amdgpu_ttm_do_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- uint64_t flags;
- int r;
-
- spin_lock(&gtt->adev->gtt_list_lock);
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, mem);
- gtt->offset = (u64)mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
-
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
-
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
- return r;
-
-}
-
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ uint64_t flags;
int r = 0;
if (gtt->userptr) {
@@ -809,9 +785,24 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- if (amdgpu_gtt_mgr_is_allocated(bo_mem))
- r = amdgpu_ttm_do_bind(ttm, bo_mem);
+ if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
+ return 0;
+ spin_lock(&gtt->adev->gtt_list_lock);
+ flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
+ ttm->pages, gtt->ttm.dma_address, flags);
+
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ ttm->num_pages, gtt->offset);
+ goto error_gart_bind;
+ }
+
+ list_add_tail(&gtt->list, &gtt->adev->gtt_list);
+error_gart_bind:
+ spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
@@ -824,20 +815,39 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_mem_reg tmp;
+
+ struct ttm_placement placement;
+ struct ttm_place placements;
int r;
if (!ttm || amdgpu_ttm_is_bound(ttm))
return 0;
- r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
- NULL, bo_mem);
- if (r) {
- DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
+ placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+
+ r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
+ if (unlikely(r))
return r;
- }
- return amdgpu_ttm_do_bind(ttm, bo_mem);
+ r = ttm_bo_move_ttm(bo, true, false, &tmp);
+ if (unlikely(r))
+ ttm_bo_mem_put(bo, &tmp);
+ else
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
+
+ return r;
}
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index f22a4758719d..43093bffa2cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -62,10 +62,6 @@ extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b9a5a77eedaf..bd20ff018512 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
unsigned i;
int r;
- if (parent->bo->shadow) {
- struct amdgpu_bo *shadow = parent->bo->shadow;
-
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
- if (r)
- return r;
- }
-
if (use_cpu_for_update) {
r = amdgpu_bo_kmap(parent->bo, NULL);
if (r)
@@ -1277,7 +1269,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
/* In the case of a mixed PT the PDE must point to it*/
if (p->adev->asic_type < CHIP_VEGA10 ||
nptes != AMDGPU_VM_PTE_COUNT(p->adev) ||
- p->func == amdgpu_vm_do_copy_ptes ||
+ p->src ||
!(flags & AMDGPU_PTE_VALID)) {
dst = amdgpu_bo_gpu_offset(entry->bo);
@@ -1294,9 +1286,23 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
entry->addr = (dst | flags);
if (use_cpu_update) {
+ /* In case a huge page is replaced with a system
+ * memory mapping, p->pages_addr != NULL and
+ * amdgpu_vm_cpu_set_ptes would try to translate dst
+ * through amdgpu_vm_map_gart. But dst is already a
+ * GPU address (of the page table). Disable
+ * amdgpu_vm_map_gart temporarily.
+ */
+ dma_addr_t *tmp;
+
+ tmp = p->pages_addr;
+ p->pages_addr = NULL;
+
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
+
+ p->pages_addr = tmp;
} else {
if (parent->bo->shadow) {
pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
@@ -1610,7 +1616,6 @@ error_free:
*
* @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1624,7 +1629,6 @@ error_free:
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint64_t gtt_flags,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1679,11 +1683,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr +
- (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
- else
- max_entries = min(max_entries, 16ull * 1024ull);
+ max_entries = min(max_entries, 16ull * 1024ull);
addr = 0;
} else if (flags & AMDGPU_PTE_VALID) {
addr += adev->vm_manager.vram_base_offset;
@@ -1728,10 +1728,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
dma_addr_t *pages_addr = NULL;
- uint64_t gtt_flags, flags;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
struct dma_fence *exclusive;
+ uint64_t flags;
int r;
if (clear || !bo_va->base.bo) {
@@ -1751,15 +1751,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo) {
+ if (bo)
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo->tbo.bdev)) ?
- flags : 0;
- } else {
+ else
flags = 0x0;
- gtt_flags = ~0x0;
- }
spin_lock(&vm->status_lock);
if (!list_empty(&bo_va->base.vm_status))
@@ -1767,8 +1762,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
spin_unlock(&vm->status_lock);
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive,
- gtt_flags, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
mapping, flags, nodes,
&bo_va->last_pt_update);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 832e592fcd07..fc260c13b1da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4579,9 +4579,9 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_misc_reserved = 0x00000003;
if (!(adev->flags & AMD_IS_APU)) {
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dyamic_cu_mask));
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
}
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
@@ -4768,8 +4768,8 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
- ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
- ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
@@ -4792,8 +4792,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
- ((struct vi_mqd_allocation *)mqd)->dyamic_cu_mask = 0xFFFFFFFF;
- ((struct vi_mqd_allocation *)mqd)->dyamic_rb_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v8_0_mqd_init(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 4f2788b61a08..6c8040e616c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -124,7 +124,7 @@ static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp, field;
+ uint32_t tmp;
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmVM_L2_CNTL);
@@ -143,9 +143,8 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
- field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL3, tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 12b0c4cd7a5a..5be9c83dfcf7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -332,7 +332,24 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size;
- amdgpu_gart_set_defaults(adev);
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_HAINAN: /* no MM engines */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
+ case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
+ case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
+ case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
+
gmc_v6_0_vram_gtt_location(adev, &adev->mc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index e42c1ad3af5e..eace9e7182c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -386,7 +386,27 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- amdgpu_gart_set_defaults(adev);
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ: /* no MM engines */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
+ case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */
+ case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
+ case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
+ case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+#endif
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
+
gmc_v7_0_vram_gtt_location(adev, &adev->mc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 7ca2dae8237a..3b3326daf32b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -562,7 +562,26 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- amdgpu_gart_set_defaults(adev);
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS11: /* all engines support GPUVM */
+ case CHIP_POLARIS10: /* all engines support GPUVM */
+ case CHIP_POLARIS12: /* all engines support GPUVM */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
+ case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
+ case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
+ case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
+
gmc_v8_0_vram_gtt_location(adev, &adev->mc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2769c2b3b56e..d04d0b123212 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -499,7 +499,21 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
- amdgpu_gart_set_defaults(adev);
+ /* set the gart size */
+ if (amdgpu_gart_size == -1) {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10: /* all engines support GPUVM */
+ default:
+ adev->mc.gart_size = 256ULL << 20;
+ break;
+ case CHIP_RAVEN: /* DCE SG support */
+ adev->mc.gart_size = 1024ULL << 20;
+ break;
+ }
+ } else {
+ adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
+ }
+
gmc_v9_0_vram_gtt_location(adev, &adev->mc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 4395a4f12149..74cb647da30e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -138,7 +138,7 @@ static void mmhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
{
- uint32_t tmp, field;
+ uint32_t tmp;
/* Setup L2 cache */
tmp = RREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL);
@@ -157,9 +157,8 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
- field = adev->vm_manager.fragment_size;
tmp = mmVM_L2_CNTL3_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index ca93b5160ba6..3e606a761d0e 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -419,8 +419,8 @@ struct vi_mqd_allocation {
struct vi_mqd mqd;
uint32_t wptr_poll_mem;
uint32_t rptr_report_mem;
- uint32_t dyamic_cu_mask;
- uint32_t dyamic_rb_mask;
+ uint32_t dynamic_cu_mask;
+ uint32_t dynamic_rb_mask;
};
struct cz_mqd {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 9d71a259d97d..f8f02e70b8bc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1558,7 +1558,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
*/
static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
- uint32_t gfx_clock, PllSetting_t *current_gfxclk_level)
+ uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
+ uint32_t *acg_freq)
{
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
@@ -1609,6 +1610,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
cpu_to_le16(dividers.usPll_ss_slew_frac);
current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
+ *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
+
return 0;
}
@@ -1689,7 +1692,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[i].value,
- &(pp_table->GfxclkLevel[i]));
+ &(pp_table->GfxclkLevel[i]),
+ &(pp_table->AcgFreqTable[i]));
if (result)
return result;
}
@@ -1698,7 +1702,8 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
while (i < NUM_GFXCLK_DPM_LEVELS) {
result = vega10_populate_single_gfx_level(hwmgr,
dpm_table->dpm_levels[j].value,
- &(pp_table->GfxclkLevel[i]));
+ &(pp_table->GfxclkLevel[i]),
+ &(pp_table->AcgFreqTable[i]));
if (result)
return result;
i++;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index f6d6c61f796a..2818c98ff5ca 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -315,10 +315,12 @@ typedef struct {
uint8_t AcgEnable[NUM_GFXCLK_DPM_LEVELS];
GbVdroopTable_t AcgBtcGbVdroopTable;
QuadraticInt_t AcgAvfsGb;
- uint32_t Reserved[4];
+
+ /* ACG Frequency Table, in Mhz */
+ uint32_t AcgFreqTable[NUM_GFXCLK_DPM_LEVELS];
/* Padding - ignore */
- uint32_t MmHubPadding[7]; /* SMU internal use */
+ uint32_t MmHubPadding[3]; /* SMU internal use */
} PPTable_t;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 76347ff6d655..c49a6f22002f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -380,7 +380,8 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->num_register_entries = 0;
}
- if (fw_type == UCODE_ID_RLC_G)
+ if ((fw_type == UCODE_ID_RLC_G)
+ || (fw_type == UCODE_ID_CP_MEC))
entry->flags = 1;
else
entry->flags = 0;
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 38cea6fb25a8..97c94f9683fa 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,17 +205,32 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
struct amd_sched_rq *rq = entity->rq;
+ int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
-
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs
+ * queued IBs or discard them on SIGKILL
*/
- wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
-
+ if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
+ r = -ERESTARTSYS;
+ else
+ r = wait_event_killable(sched->job_scheduled,
+ amd_sched_entity_is_idle(entity));
amd_sched_rq_remove_entity(rq, entity);
+ if (r) {
+ struct amd_sched_job *job;
+
+ /* Park the kernel for a moment to make sure it isn't processing
+ * our enity.
+ */
+ kthread_park(sched->thread);
+ kthread_unpark(sched->thread);
+ while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
+ sched->ops->free_job(job);
+
+ }
kfifo_free(&entity->job_queue);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cba11f13d994..180ce6296416 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -109,8 +109,8 @@ static ssize_t ttm_bo_global_show(struct kobject *kobj,
struct ttm_bo_global *glob =
container_of(kobj, struct ttm_bo_global, kobj);
- return snprintf(buffer, PAGE_SIZE, "%lu\n",
- (unsigned long) atomic_read(&glob->bo_count));
+ return snprintf(buffer, PAGE_SIZE, "%d\n",
+ atomic_read(&glob->bo_count));
}
static struct attribute *ttm_bo_global_attrs[] = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d0459b392e5e..c934ad5b3903 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -469,6 +469,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
+ atomic_inc(&bo->glob->bo_count);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index edc0cfb6fc1a..c06dce2c1da7 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -336,6 +336,16 @@ config I2C_POWERMAC
comment "I2C system bus drivers (mostly embedded / system-on-chip)"
+config I2C_ALTERA
+ tristate "Altera Soft IP I2C"
+ depends on (ARCH_SOCFPGA || NIOS2) && OF
+ help
+ If you say yes to this option, support will be included for the
+ Altera Soft IP I2C interfaces on SoCFPGA and Nios2 architectures.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-altera.
+
config I2C_ASPEED
tristate "Aspeed I2C Controller"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -935,6 +945,16 @@ config I2C_STM32F4
This driver can also be built as module. If so, the module
will be called i2c-stm32f4.
+config I2C_STM32F7
+ tristate "STMicroelectronics STM32F7 I2C support"
+ depends on ARCH_STM32 || COMPILE_TEST
+ help
+ Enable this option to add support for STM32 I2C controller embedded
+ in STM32F7 SoCs.
+
+ This driver can also be built as module. If so, the module
+ will be called i2c-stm32f7.
+
config I2C_STU300
tristate "ST Microelectronics DDC I2C interface"
depends on MACH_U300
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 562daf738048..47f3ac9a695a 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o
obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o
# Embedded system I2C/SMBus host controller drivers
+obj-$(CONFIG_I2C_ALTERA) += i2c-altera.o
obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_I2C_SIRF) += i2c-sirf.o
obj-$(CONFIG_I2C_SPRD) += i2c-sprd.o
obj-$(CONFIG_I2C_ST) += i2c-st.o
obj-$(CONFIG_I2C_STM32F4) += i2c-stm32f4.o
+obj-$(CONFIG_I2C_STM32F7) += i2c-stm32f7.o
obj-$(CONFIG_I2C_STU300) += i2c-stu300.o
obj-$(CONFIG_I2C_SUN6I_P2WI) += i2c-sun6i-p2wi.o
obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
new file mode 100644
index 000000000000..f5e1941e65b5
--- /dev/null
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright Intel Corporation (C) 2017.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on the i2c-axxia.c driver.
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#define ALTR_I2C_TFR_CMD 0x00 /* Transfer Command register */
+#define ALTR_I2C_TFR_CMD_STA BIT(9) /* send START before byte */
+#define ALTR_I2C_TFR_CMD_STO BIT(8) /* send STOP after byte */
+#define ALTR_I2C_TFR_CMD_RW_D BIT(0) /* Direction of transfer */
+#define ALTR_I2C_RX_DATA 0x04 /* RX data FIFO register */
+#define ALTR_I2C_CTRL 0x08 /* Control register */
+#define ALTR_I2C_CTRL_RXT_SHFT 4 /* RX FIFO Threshold */
+#define ALTR_I2C_CTRL_TCT_SHFT 2 /* TFER CMD FIFO Threshold */
+#define ALTR_I2C_CTRL_BSPEED BIT(1) /* Bus Speed (1=Fast) */
+#define ALTR_I2C_CTRL_EN BIT(0) /* Enable Core (1=Enable) */
+#define ALTR_I2C_ISER 0x0C /* Interrupt Status Enable register */
+#define ALTR_I2C_ISER_RXOF_EN BIT(4) /* Enable RX OVERFLOW IRQ */
+#define ALTR_I2C_ISER_ARB_EN BIT(3) /* Enable ARB LOST IRQ */
+#define ALTR_I2C_ISER_NACK_EN BIT(2) /* Enable NACK DET IRQ */
+#define ALTR_I2C_ISER_RXRDY_EN BIT(1) /* Enable RX Ready IRQ */
+#define ALTR_I2C_ISER_TXRDY_EN BIT(0) /* Enable TX Ready IRQ */
+#define ALTR_I2C_ISR 0x10 /* Interrupt Status register */
+#define ALTR_I2C_ISR_RXOF BIT(4) /* RX OVERFLOW IRQ */
+#define ALTR_I2C_ISR_ARB BIT(3) /* ARB LOST IRQ */
+#define ALTR_I2C_ISR_NACK BIT(2) /* NACK DET IRQ */
+#define ALTR_I2C_ISR_RXRDY BIT(1) /* RX Ready IRQ */
+#define ALTR_I2C_ISR_TXRDY BIT(0) /* TX Ready IRQ */
+#define ALTR_I2C_STATUS 0x14 /* Status register */
+#define ALTR_I2C_STAT_CORE BIT(0) /* Core Status (0=idle) */
+#define ALTR_I2C_TC_FIFO_LVL 0x18 /* Transfer FIFO LVL register */
+#define ALTR_I2C_RX_FIFO_LVL 0x1C /* Receive FIFO LVL register */
+#define ALTR_I2C_SCL_LOW 0x20 /* SCL low count register */
+#define ALTR_I2C_SCL_HIGH 0x24 /* SCL high count register */
+#define ALTR_I2C_SDA_HOLD 0x28 /* SDA hold count register */
+
+#define ALTR_I2C_ALL_IRQ (ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | \
+ ALTR_I2C_ISR_NACK | ALTR_I2C_ISR_RXRDY | \
+ ALTR_I2C_ISR_TXRDY)
+
+#define ALTR_I2C_THRESHOLD 0 /* IRQ Threshold at 1 element */
+#define ALTR_I2C_DFLT_FIFO_SZ 4
+#define ALTR_I2C_TIMEOUT 100000 /* 100ms */
+#define ALTR_I2C_XFER_TIMEOUT (msecs_to_jiffies(250))
+
+/**
+ * altr_i2c_dev - I2C device context
+ * @base: pointer to register struct
+ * @msg: pointer to current message
+ * @msg_len: number of bytes transferred in msg
+ * @msg_err: error code for completed message
+ * @msg_complete: xfer completion object
+ * @dev: device reference
+ * @adapter: core i2c abstraction
+ * @i2c_clk: clock reference for i2c input clock
+ * @bus_clk_rate: current i2c bus clock rate
+ * @buf: ptr to msg buffer for easier use.
+ * @fifo_size: size of the FIFO passed in.
+ * @isr_mask: cached copy of local ISR enables.
+ * @isr_status: cached copy of local ISR status.
+ * @lock: spinlock for IRQ synchronization.
+ */
+struct altr_i2c_dev {
+ void __iomem *base;
+ struct i2c_msg *msg;
+ size_t msg_len;
+ int msg_err;
+ struct completion msg_complete;
+ struct device *dev;
+ struct i2c_adapter adapter;
+ struct clk *i2c_clk;
+ u32 bus_clk_rate;
+ u8 *buf;
+ u32 fifo_size;
+ u32 isr_mask;
+ u32 isr_status;
+ spinlock_t lock; /* IRQ synchronization */
+};
+
+static void
+altr_i2c_int_enable(struct altr_i2c_dev *idev, u32 mask, bool enable)
+{
+ unsigned long flags;
+ u32 int_en;
+
+ spin_lock_irqsave(&idev->lock, flags);
+
+ int_en = readl(idev->base + ALTR_I2C_ISER);
+ if (enable)
+ idev->isr_mask = int_en | mask;
+ else
+ idev->isr_mask = int_en & ~mask;
+
+ writel(idev->isr_mask, idev->base + ALTR_I2C_ISER);
+
+ spin_unlock_irqrestore(&idev->lock, flags);
+}
+
+static void altr_i2c_int_clear(struct altr_i2c_dev *idev, u32 mask)
+{
+ u32 int_en = readl(idev->base + ALTR_I2C_ISR);
+
+ writel(int_en | mask, idev->base + ALTR_I2C_ISR);
+}
+
+static void altr_i2c_core_disable(struct altr_i2c_dev *idev)
+{
+ u32 tmp = readl(idev->base + ALTR_I2C_CTRL);
+
+ writel(tmp & ~ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL);
+}
+
+static void altr_i2c_core_enable(struct altr_i2c_dev *idev)
+{
+ u32 tmp = readl(idev->base + ALTR_I2C_CTRL);
+
+ writel(tmp | ALTR_I2C_CTRL_EN, idev->base + ALTR_I2C_CTRL);
+}
+
+static void altr_i2c_reset(struct altr_i2c_dev *idev)
+{
+ altr_i2c_core_disable(idev);
+ altr_i2c_core_enable(idev);
+}
+
+static inline void altr_i2c_stop(struct altr_i2c_dev *idev)
+{
+ writel(ALTR_I2C_TFR_CMD_STO, idev->base + ALTR_I2C_TFR_CMD);
+}
+
+static void altr_i2c_init(struct altr_i2c_dev *idev)
+{
+ u32 divisor = clk_get_rate(idev->i2c_clk) / idev->bus_clk_rate;
+ u32 clk_mhz = clk_get_rate(idev->i2c_clk) / 1000000;
+ u32 tmp = (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_RXT_SHFT) |
+ (ALTR_I2C_THRESHOLD << ALTR_I2C_CTRL_TCT_SHFT);
+ u32 t_high, t_low;
+
+ if (idev->bus_clk_rate <= 100000) {
+ tmp &= ~ALTR_I2C_CTRL_BSPEED;
+ /* Standard mode SCL 50/50 */
+ t_high = divisor * 1 / 2;
+ t_low = divisor * 1 / 2;
+ } else {
+ tmp |= ALTR_I2C_CTRL_BSPEED;
+ /* Fast mode SCL 33/66 */
+ t_high = divisor * 1 / 3;
+ t_low = divisor * 2 / 3;
+ }
+ writel(tmp, idev->base + ALTR_I2C_CTRL);
+
+ dev_dbg(idev->dev, "rate=%uHz per_clk=%uMHz -> ratio=1:%u\n",
+ idev->bus_clk_rate, clk_mhz, divisor);
+
+ /* Reset controller */
+ altr_i2c_reset(idev);
+
+ /* SCL High Time */
+ writel(t_high, idev->base + ALTR_I2C_SCL_HIGH);
+ /* SCL Low Time */
+ writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
+ /* SDA Hold Time, 300ns */
+ writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
+
+ /* Mask all master interrupt bits */
+ altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
+}
+
+/**
+ * altr_i2c_transfer - On the last byte to be transmitted, send
+ * a Stop bit on the last byte.
+ */
+static void altr_i2c_transfer(struct altr_i2c_dev *idev, u32 data)
+{
+ /* On the last byte to be transmitted, send STOP */
+ if (idev->msg_len == 1)
+ data |= ALTR_I2C_TFR_CMD_STO;
+ if (idev->msg_len > 0)
+ writel(data, idev->base + ALTR_I2C_TFR_CMD);
+}
+
+/**
+ * altr_i2c_empty_rx_fifo - Fetch data from RX FIFO until end of
+ * transfer. Send a Stop bit on the last byte.
+ */
+static void altr_i2c_empty_rx_fifo(struct altr_i2c_dev *idev)
+{
+ size_t rx_fifo_avail = readl(idev->base + ALTR_I2C_RX_FIFO_LVL);
+ int bytes_to_transfer = min(rx_fifo_avail, idev->msg_len);
+
+ while (bytes_to_transfer-- > 0) {
+ *idev->buf++ = readl(idev->base + ALTR_I2C_RX_DATA);
+ idev->msg_len--;
+ altr_i2c_transfer(idev, 0);
+ }
+}
+
+/**
+ * altr_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
+ * @return: Number of bytes left to transfer.
+ */
+static int altr_i2c_fill_tx_fifo(struct altr_i2c_dev *idev)
+{
+ size_t tx_fifo_avail = idev->fifo_size - readl(idev->base +
+ ALTR_I2C_TC_FIFO_LVL);
+ int bytes_to_transfer = min(tx_fifo_avail, idev->msg_len);
+ int ret = idev->msg_len - bytes_to_transfer;
+
+ while (bytes_to_transfer-- > 0) {
+ altr_i2c_transfer(idev, *idev->buf++);
+ idev->msg_len--;
+ }
+
+ return ret;
+}
+
+static irqreturn_t altr_i2c_isr_quick(int irq, void *_dev)
+{
+ struct altr_i2c_dev *idev = _dev;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ /* Read IRQ status but only interested in Enabled IRQs. */
+ idev->isr_status = readl(idev->base + ALTR_I2C_ISR) & idev->isr_mask;
+ if (idev->isr_status)
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t altr_i2c_isr(int irq, void *_dev)
+{
+ int ret;
+ bool read, finish = false;
+ struct altr_i2c_dev *idev = _dev;
+ u32 status = idev->isr_status;
+
+ if (!idev->msg) {
+ dev_warn(idev->dev, "unexpected interrupt\n");
+ altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
+ return IRQ_HANDLED;
+ }
+ read = (idev->msg->flags & I2C_M_RD) != 0;
+
+ /* handle Lost Arbitration */
+ if (unlikely(status & ALTR_I2C_ISR_ARB)) {
+ altr_i2c_int_clear(idev, ALTR_I2C_ISR_ARB);
+ idev->msg_err = -EAGAIN;
+ finish = true;
+ } else if (unlikely(status & ALTR_I2C_ISR_NACK)) {
+ dev_dbg(idev->dev, "Could not get ACK\n");
+ idev->msg_err = -ENXIO;
+ altr_i2c_int_clear(idev, ALTR_I2C_ISR_NACK);
+ altr_i2c_stop(idev);
+ finish = true;
+ } else if (read && unlikely(status & ALTR_I2C_ISR_RXOF)) {
+ /* handle RX FIFO Overflow */
+ altr_i2c_empty_rx_fifo(idev);
+ altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY);
+ altr_i2c_stop(idev);
+ dev_err(idev->dev, "RX FIFO Overflow\n");
+ finish = true;
+ } else if (read && (status & ALTR_I2C_ISR_RXRDY)) {
+ /* RX FIFO needs service? */
+ altr_i2c_empty_rx_fifo(idev);
+ altr_i2c_int_clear(idev, ALTR_I2C_ISR_RXRDY);
+ if (!idev->msg_len)
+ finish = true;
+ } else if (!read && (status & ALTR_I2C_ISR_TXRDY)) {
+ /* TX FIFO needs service? */
+ altr_i2c_int_clear(idev, ALTR_I2C_ISR_TXRDY);
+ if (idev->msg_len > 0)
+ altr_i2c_fill_tx_fifo(idev);
+ else
+ finish = true;
+ } else {
+ dev_warn(idev->dev, "Unexpected interrupt: 0x%x\n", status);
+ altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
+ }
+
+ if (finish) {
+ /* Wait for the Core to finish */
+ ret = readl_poll_timeout_atomic(idev->base + ALTR_I2C_STATUS,
+ status,
+ !(status & ALTR_I2C_STAT_CORE),
+ 1, ALTR_I2C_TIMEOUT);
+ if (ret)
+ dev_err(idev->dev, "message timeout\n");
+ altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
+ altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
+ complete(&idev->msg_complete);
+ dev_dbg(idev->dev, "Message Complete\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
+{
+ u32 imask = ALTR_I2C_ISR_RXOF | ALTR_I2C_ISR_ARB | ALTR_I2C_ISR_NACK;
+ unsigned long time_left;
+ u32 value;
+ u8 addr = i2c_8bit_addr_from_msg(msg);
+
+ idev->msg = msg;
+ idev->msg_len = msg->len;
+ idev->buf = msg->buf;
+ idev->msg_err = 0;
+ reinit_completion(&idev->msg_complete);
+ altr_i2c_core_enable(idev);
+
+ /* Make sure RX FIFO is empty */
+ do {
+ readl(idev->base + ALTR_I2C_RX_DATA);
+ } while (readl(idev->base + ALTR_I2C_RX_FIFO_LVL));
+
+ writel(ALTR_I2C_TFR_CMD_STA | addr, idev->base + ALTR_I2C_TFR_CMD);
+
+ if ((msg->flags & I2C_M_RD) != 0) {
+ imask |= ALTR_I2C_ISER_RXOF_EN | ALTR_I2C_ISER_RXRDY_EN;
+ altr_i2c_int_enable(idev, imask, true);
+ /* write the first byte to start the RX */
+ altr_i2c_transfer(idev, 0);
+ } else {
+ imask |= ALTR_I2C_ISR_TXRDY;
+ altr_i2c_int_enable(idev, imask, true);
+ altr_i2c_fill_tx_fifo(idev);
+ }
+
+ time_left = wait_for_completion_timeout(&idev->msg_complete,
+ ALTR_I2C_XFER_TIMEOUT);
+ altr_i2c_int_enable(idev, imask, false);
+
+ value = readl(idev->base + ALTR_I2C_STATUS) & ALTR_I2C_STAT_CORE;
+ if (value)
+ dev_err(idev->dev, "Core Status not IDLE...\n");
+
+ if (time_left == 0) {
+ idev->msg_err = -ETIMEDOUT;
+ dev_dbg(idev->dev, "Transaction timed out.\n");
+ }
+
+ altr_i2c_core_disable(idev);
+
+ return idev->msg_err;
+}
+
+static int
+altr_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct altr_i2c_dev *idev = i2c_get_adapdata(adap);
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ ret = altr_i2c_xfer_msg(idev, msgs++);
+ if (ret)
+ return ret;
+ }
+ return num;
+}
+
+static u32 altr_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm altr_i2c_algo = {
+ .master_xfer = altr_i2c_xfer,
+ .functionality = altr_i2c_func,
+};
+
+static int altr_i2c_probe(struct platform_device *pdev)
+{
+ struct altr_i2c_dev *idev = NULL;
+ struct resource *res;
+ int irq, ret;
+ u32 val;
+
+ idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
+ if (!idev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ idev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(idev->base))
+ return PTR_ERR(idev->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "missing interrupt resource\n");
+ return irq;
+ }
+
+ idev->i2c_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(idev->i2c_clk)) {
+ dev_err(&pdev->dev, "missing clock\n");
+ return PTR_ERR(idev->i2c_clk);
+ }
+
+ idev->dev = &pdev->dev;
+ init_completion(&idev->msg_complete);
+ spin_lock_init(&idev->lock);
+
+ val = device_property_read_u32(idev->dev, "fifo-size",
+ &idev->fifo_size);
+ if (val) {
+ dev_err(&pdev->dev, "FIFO size set to default of %d\n",
+ ALTR_I2C_DFLT_FIFO_SZ);
+ idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
+ }
+
+ val = device_property_read_u32(idev->dev, "clock-frequency",
+ &idev->bus_clk_rate);
+ if (val) {
+ dev_err(&pdev->dev, "Default to 100kHz\n");
+ idev->bus_clk_rate = 100000; /* default clock rate */
+ }
+
+ if (idev->bus_clk_rate > 400000) {
+ dev_err(&pdev->dev, "invalid clock-frequency %d\n",
+ idev->bus_clk_rate);
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, altr_i2c_isr_quick,
+ altr_i2c_isr, IRQF_ONESHOT,
+ pdev->name, idev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to claim IRQ %d\n", irq);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(idev->i2c_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable clock\n");
+ return ret;
+ }
+
+ altr_i2c_init(idev);
+
+ i2c_set_adapdata(&idev->adapter, idev);
+ strlcpy(idev->adapter.name, pdev->name, sizeof(idev->adapter.name));
+ idev->adapter.owner = THIS_MODULE;
+ idev->adapter.algo = &altr_i2c_algo;
+ idev->adapter.dev.parent = &pdev->dev;
+ idev->adapter.dev.of_node = pdev->dev.of_node;
+
+ platform_set_drvdata(pdev, idev);
+
+ ret = i2c_add_adapter(&idev->adapter);
+ if (ret) {
+ clk_disable_unprepare(idev->i2c_clk);
+ return ret;
+ }
+ dev_info(&pdev->dev, "Altera SoftIP I2C Probe Complete\n");
+
+ return 0;
+}
+
+static int altr_i2c_remove(struct platform_device *pdev)
+{
+ struct altr_i2c_dev *idev = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(idev->i2c_clk);
+ i2c_del_adapter(&idev->adapter);
+
+ return 0;
+}
+
+/* Match table for of_platform binding */
+static const struct of_device_id altr_i2c_of_match[] = {
+ { .compatible = "altr,softip-i2c-v1.0" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altr_i2c_of_match);
+
+static struct platform_driver altr_i2c_driver = {
+ .probe = altr_i2c_probe,
+ .remove = altr_i2c_remove,
+ .driver = {
+ .name = "altera-i2c",
+ .of_match_table = altr_i2c_of_match,
+ },
+};
+
+module_platform_driver(altr_i2c_driver);
+
+MODULE_DESCRIPTION("Altera Soft IP I2C bus driver");
+MODULE_AUTHOR("Thor Thayer <thor.thayer@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h
new file mode 100644
index 000000000000..dab51761f8c5
--- /dev/null
+++ b/drivers/i2c/busses/i2c-stm32.h
@@ -0,0 +1,20 @@
+/*
+ * i2c-stm32.h
+ *
+ * Copyright (C) M'boumba Cedric Madianga 2017
+ * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _I2C_STM32_H
+#define _I2C_STM32_H
+
+enum stm32_i2c_speed {
+ STM32_I2C_SPEED_STANDARD, /* 100 kHz */
+ STM32_I2C_SPEED_FAST, /* 400 kHz */
+ STM32_I2C_SPEED_FAST_PLUS, /* 1 MHz */
+ STM32_I2C_SPEED_END,
+};
+
+#endif /* _I2C_STM32_H */
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index aceb6f788564..4ec108496f15 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -27,6 +27,8 @@
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include "i2c-stm32.h"
+
/* STM32F4 I2C offset registers */
#define STM32F4_I2C_CR1 0x00
#define STM32F4_I2C_CR2 0x04
@@ -90,12 +92,6 @@
#define STM32F4_I2C_MAX_FREQ 46U
#define HZ_TO_MHZ 1000000
-enum stm32f4_i2c_speed {
- STM32F4_I2C_SPEED_STANDARD, /* 100 kHz */
- STM32F4_I2C_SPEED_FAST, /* 400 kHz */
- STM32F4_I2C_SPEED_END,
-};
-
/**
* struct stm32f4_i2c_msg - client specific data
* @addr: 8-bit slave addr, including r/w bit
@@ -159,7 +155,7 @@ static int stm32f4_i2c_set_periph_clk_freq(struct stm32f4_i2c_dev *i2c_dev)
i2c_dev->parent_rate = clk_get_rate(i2c_dev->clk);
freq = DIV_ROUND_UP(i2c_dev->parent_rate, HZ_TO_MHZ);
- if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) {
+ if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) {
/*
* To reach 100 kHz, the parent clk frequency should be between
* a minimum value of 2 MHz and a maximum value of 46 MHz due
@@ -216,7 +212,7 @@ static void stm32f4_i2c_set_rise_time(struct stm32f4_i2c_dev *i2c_dev)
* is not higher than 46 MHz . As a result trise is at most 4 bits wide
* and so fits into the TRISE bits [5:0].
*/
- if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD)
+ if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD)
trise = freq + 1;
else
trise = freq * 3 / 10 + 1;
@@ -230,7 +226,7 @@ static void stm32f4_i2c_set_speed_mode(struct stm32f4_i2c_dev *i2c_dev)
u32 val;
u32 ccr = 0;
- if (i2c_dev->speed == STM32F4_I2C_SPEED_STANDARD) {
+ if (i2c_dev->speed == STM32_I2C_SPEED_STANDARD) {
/*
* In standard mode:
* t_scl_high = t_scl_low = CCR * I2C parent clk period
@@ -808,10 +804,10 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
udelay(2);
reset_control_deassert(rst);
- i2c_dev->speed = STM32F4_I2C_SPEED_STANDARD;
+ i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
ret = of_property_read_u32(np, "clock-frequency", &clk_rate);
if (!ret && clk_rate >= 400000)
- i2c_dev->speed = STM32F4_I2C_SPEED_FAST;
+ i2c_dev->speed = STM32_I2C_SPEED_FAST;
i2c_dev->dev = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
new file mode 100644
index 000000000000..47c67b0ca896
--- /dev/null
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -0,0 +1,972 @@
+/*
+ * Driver for STMicroelectronics STM32F7 I2C controller
+ *
+ * This I2C controller is described in the STM32F75xxx and STM32F74xxx Soc
+ * reference manual.
+ * Please see below a link to the documentation:
+ * http://www.st.com/resource/en/reference_manual/dm00124865.pdf
+ *
+ * Copyright (C) M'boumba Cedric Madianga 2017
+ * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
+ *
+ * This driver is based on i2c-stm32f4.c
+ *
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "i2c-stm32.h"
+
+/* STM32F7 I2C registers */
+#define STM32F7_I2C_CR1 0x00
+#define STM32F7_I2C_CR2 0x04
+#define STM32F7_I2C_TIMINGR 0x10
+#define STM32F7_I2C_ISR 0x18
+#define STM32F7_I2C_ICR 0x1C
+#define STM32F7_I2C_RXDR 0x24
+#define STM32F7_I2C_TXDR 0x28
+
+/* STM32F7 I2C control 1 */
+#define STM32F7_I2C_CR1_ANFOFF BIT(12)
+#define STM32F7_I2C_CR1_ERRIE BIT(7)
+#define STM32F7_I2C_CR1_TCIE BIT(6)
+#define STM32F7_I2C_CR1_STOPIE BIT(5)
+#define STM32F7_I2C_CR1_NACKIE BIT(4)
+#define STM32F7_I2C_CR1_ADDRIE BIT(3)
+#define STM32F7_I2C_CR1_RXIE BIT(2)
+#define STM32F7_I2C_CR1_TXIE BIT(1)
+#define STM32F7_I2C_CR1_PE BIT(0)
+#define STM32F7_I2C_ALL_IRQ_MASK (STM32F7_I2C_CR1_ERRIE \
+ | STM32F7_I2C_CR1_TCIE \
+ | STM32F7_I2C_CR1_STOPIE \
+ | STM32F7_I2C_CR1_NACKIE \
+ | STM32F7_I2C_CR1_RXIE \
+ | STM32F7_I2C_CR1_TXIE)
+
+/* STM32F7 I2C control 2 */
+#define STM32F7_I2C_CR2_RELOAD BIT(24)
+#define STM32F7_I2C_CR2_NBYTES_MASK GENMASK(23, 16)
+#define STM32F7_I2C_CR2_NBYTES(n) (((n) & 0xff) << 16)
+#define STM32F7_I2C_CR2_NACK BIT(15)
+#define STM32F7_I2C_CR2_STOP BIT(14)
+#define STM32F7_I2C_CR2_START BIT(13)
+#define STM32F7_I2C_CR2_RD_WRN BIT(10)
+#define STM32F7_I2C_CR2_SADD7_MASK GENMASK(7, 1)
+#define STM32F7_I2C_CR2_SADD7(n) (((n) & 0x7f) << 1)
+
+/* STM32F7 I2C Interrupt Status */
+#define STM32F7_I2C_ISR_BUSY BIT(15)
+#define STM32F7_I2C_ISR_ARLO BIT(9)
+#define STM32F7_I2C_ISR_BERR BIT(8)
+#define STM32F7_I2C_ISR_TCR BIT(7)
+#define STM32F7_I2C_ISR_TC BIT(6)
+#define STM32F7_I2C_ISR_STOPF BIT(5)
+#define STM32F7_I2C_ISR_NACKF BIT(4)
+#define STM32F7_I2C_ISR_RXNE BIT(2)
+#define STM32F7_I2C_ISR_TXIS BIT(1)
+
+/* STM32F7 I2C Interrupt Clear */
+#define STM32F7_I2C_ICR_ARLOCF BIT(9)
+#define STM32F7_I2C_ICR_BERRCF BIT(8)
+#define STM32F7_I2C_ICR_STOPCF BIT(5)
+#define STM32F7_I2C_ICR_NACKCF BIT(4)
+
+/* STM32F7 I2C Timing */
+#define STM32F7_I2C_TIMINGR_PRESC(n) (((n) & 0xf) << 28)
+#define STM32F7_I2C_TIMINGR_SCLDEL(n) (((n) & 0xf) << 20)
+#define STM32F7_I2C_TIMINGR_SDADEL(n) (((n) & 0xf) << 16)
+#define STM32F7_I2C_TIMINGR_SCLH(n) (((n) & 0xff) << 8)
+#define STM32F7_I2C_TIMINGR_SCLL(n) ((n) & 0xff)
+
+#define STM32F7_I2C_MAX_LEN 0xff
+
+#define STM32F7_I2C_DNF_DEFAULT 0
+#define STM32F7_I2C_DNF_MAX 16
+
+#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
+#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
+#define STM32F7_I2C_ANALOG_FILTER_DELAY_MAX 260 /* ns */
+
+#define STM32F7_I2C_RISE_TIME_DEFAULT 25 /* ns */
+#define STM32F7_I2C_FALL_TIME_DEFAULT 10 /* ns */
+
+#define STM32F7_PRESC_MAX BIT(4)
+#define STM32F7_SCLDEL_MAX BIT(4)
+#define STM32F7_SDADEL_MAX BIT(4)
+#define STM32F7_SCLH_MAX BIT(8)
+#define STM32F7_SCLL_MAX BIT(8)
+
+/**
+ * struct stm32f7_i2c_spec - private i2c specification timing
+ * @rate: I2C bus speed (Hz)
+ * @rate_min: 80% of I2C bus speed (Hz)
+ * @rate_max: 100% of I2C bus speed (Hz)
+ * @fall_max: Max fall time of both SDA and SCL signals (ns)
+ * @rise_max: Max rise time of both SDA and SCL signals (ns)
+ * @hddat_min: Min data hold time (ns)
+ * @vddat_max: Max data valid time (ns)
+ * @sudat_min: Min data setup time (ns)
+ * @l_min: Min low period of the SCL clock (ns)
+ * @h_min: Min high period of the SCL clock (ns)
+ */
+struct stm32f7_i2c_spec {
+ u32 rate;
+ u32 rate_min;
+ u32 rate_max;
+ u32 fall_max;
+ u32 rise_max;
+ u32 hddat_min;
+ u32 vddat_max;
+ u32 sudat_min;
+ u32 l_min;
+ u32 h_min;
+};
+
+/**
+ * struct stm32f7_i2c_setup - private I2C timing setup parameters
+ * @speed: I2C speed mode (standard, Fast Plus)
+ * @speed_freq: I2C speed frequency (Hz)
+ * @clock_src: I2C clock source frequency (Hz)
+ * @rise_time: Rise time (ns)
+ * @fall_time: Fall time (ns)
+ * @dnf: Digital filter coefficient (0-16)
+ * @analog_filter: Analog filter delay (On/Off)
+ */
+struct stm32f7_i2c_setup {
+ enum stm32_i2c_speed speed;
+ u32 speed_freq;
+ u32 clock_src;
+ u32 rise_time;
+ u32 fall_time;
+ u8 dnf;
+ bool analog_filter;
+};
+
+/**
+ * struct stm32f7_i2c_timings - private I2C output parameters
+ * @prec: Prescaler value
+ * @scldel: Data setup time
+ * @sdadel: Data hold time
+ * @sclh: SCL high period (master mode)
+ * @sclh: SCL low period (master mode)
+ */
+struct stm32f7_i2c_timings {
+ struct list_head node;
+ u8 presc;
+ u8 scldel;
+ u8 sdadel;
+ u8 sclh;
+ u8 scll;
+};
+
+/**
+ * struct stm32f7_i2c_msg - client specific data
+ * @addr: 8-bit slave addr, including r/w bit
+ * @count: number of bytes to be transferred
+ * @buf: data buffer
+ * @result: result of the transfer
+ * @stop: last I2C msg to be sent, i.e. STOP to be generated
+ */
+struct stm32f7_i2c_msg {
+ u8 addr;
+ u32 count;
+ u8 *buf;
+ int result;
+ bool stop;
+};
+
+/**
+ * struct stm32f7_i2c_dev - private data of the controller
+ * @adap: I2C adapter for this controller
+ * @dev: device for this controller
+ * @base: virtual memory area
+ * @complete: completion of I2C message
+ * @clk: hw i2c clock
+ * @speed: I2C clock frequency of the controller. Standard, Fast or Fast+
+ * @msg: Pointer to data to be written
+ * @msg_num: number of I2C messages to be executed
+ * @msg_id: message identifiant
+ * @f7_msg: customized i2c msg for driver usage
+ * @setup: I2C timing input setup
+ * @timing: I2C computed timings
+ */
+struct stm32f7_i2c_dev {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *base;
+ struct completion complete;
+ struct clk *clk;
+ int speed;
+ struct i2c_msg *msg;
+ unsigned int msg_num;
+ unsigned int msg_id;
+ struct stm32f7_i2c_msg f7_msg;
+ struct stm32f7_i2c_setup *setup;
+ struct stm32f7_i2c_timings timing;
+};
+
+/**
+ * All these values are coming from I2C Specification, Version 6.0, 4th of
+ * April 2014.
+ *
+ * Table10. Characteristics of the SDA and SCL bus lines for Standard, Fast,
+ * and Fast-mode Plus I2C-bus devices
+ */
+static struct stm32f7_i2c_spec i2c_specs[] = {
+ [STM32_I2C_SPEED_STANDARD] = {
+ .rate = 100000,
+ .rate_min = 80000,
+ .rate_max = 100000,
+ .fall_max = 300,
+ .rise_max = 1000,
+ .hddat_min = 0,
+ .vddat_max = 3450,
+ .sudat_min = 250,
+ .l_min = 4700,
+ .h_min = 4000,
+ },
+ [STM32_I2C_SPEED_FAST] = {
+ .rate = 400000,
+ .rate_min = 320000,
+ .rate_max = 400000,
+ .fall_max = 300,
+ .rise_max = 300,
+ .hddat_min = 0,
+ .vddat_max = 900,
+ .sudat_min = 100,
+ .l_min = 1300,
+ .h_min = 600,
+ },
+ [STM32_I2C_SPEED_FAST_PLUS] = {
+ .rate = 1000000,
+ .rate_min = 800000,
+ .rate_max = 1000000,
+ .fall_max = 100,
+ .rise_max = 120,
+ .hddat_min = 0,
+ .vddat_max = 450,
+ .sudat_min = 50,
+ .l_min = 500,
+ .h_min = 260,
+ },
+};
+
+struct stm32f7_i2c_setup stm32f7_setup = {
+ .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
+ .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
+ .dnf = STM32F7_I2C_DNF_DEFAULT,
+ .analog_filter = STM32F7_I2C_ANALOG_FILTER_ENABLE,
+};
+
+static inline void stm32f7_i2c_set_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) | mask, reg);
+}
+
+static inline void stm32f7_i2c_clr_bits(void __iomem *reg, u32 mask)
+{
+ writel_relaxed(readl_relaxed(reg) & ~mask, reg);
+}
+
+static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
+ struct stm32f7_i2c_setup *setup,
+ struct stm32f7_i2c_timings *output)
+{
+ u32 p_prev = STM32F7_PRESC_MAX;
+ u32 i2cclk = DIV_ROUND_CLOSEST(NSEC_PER_SEC,
+ setup->clock_src);
+ u32 i2cbus = DIV_ROUND_CLOSEST(NSEC_PER_SEC,
+ setup->speed_freq);
+ u32 clk_error_prev = i2cbus;
+ u32 tsync;
+ u32 af_delay_min, af_delay_max;
+ u32 dnf_delay;
+ u32 clk_min, clk_max;
+ int sdadel_min, sdadel_max;
+ int scldel_min;
+ struct stm32f7_i2c_timings *v, *_v, *s;
+ struct list_head solutions;
+ u16 p, l, a, h;
+ int ret = 0;
+
+ if (setup->speed >= STM32_I2C_SPEED_END) {
+ dev_err(i2c_dev->dev, "speed out of bound {%d/%d}\n",
+ setup->speed, STM32_I2C_SPEED_END - 1);
+ return -EINVAL;
+ }
+
+ if ((setup->rise_time > i2c_specs[setup->speed].rise_max) ||
+ (setup->fall_time > i2c_specs[setup->speed].fall_max)) {
+ dev_err(i2c_dev->dev,
+ "timings out of bound Rise{%d>%d}/Fall{%d>%d}\n",
+ setup->rise_time, i2c_specs[setup->speed].rise_max,
+ setup->fall_time, i2c_specs[setup->speed].fall_max);
+ return -EINVAL;
+ }
+
+ if (setup->dnf > STM32F7_I2C_DNF_MAX) {
+ dev_err(i2c_dev->dev,
+ "DNF out of bound %d/%d\n",
+ setup->dnf, STM32F7_I2C_DNF_MAX);
+ return -EINVAL;
+ }
+
+ if (setup->speed_freq > i2c_specs[setup->speed].rate) {
+ dev_err(i2c_dev->dev, "ERROR: Freq {%d/%d}\n",
+ setup->speed_freq, i2c_specs[setup->speed].rate);
+ return -EINVAL;
+ }
+
+ /* Analog and Digital Filters */
+ af_delay_min =
+ (setup->analog_filter ?
+ STM32F7_I2C_ANALOG_FILTER_DELAY_MIN : 0);
+ af_delay_max =
+ (setup->analog_filter ?
+ STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
+ dnf_delay = setup->dnf * i2cclk;
+
+ sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
+ af_delay_min - (setup->dnf + 3) * i2cclk;
+
+ sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -
+ af_delay_max - (setup->dnf + 4) * i2cclk;
+
+ scldel_min = setup->rise_time + i2c_specs[setup->speed].sudat_min;
+
+ if (sdadel_min < 0)
+ sdadel_min = 0;
+ if (sdadel_max < 0)
+ sdadel_max = 0;
+
+ dev_dbg(i2c_dev->dev, "SDADEL(min/max): %i/%i, SCLDEL(Min): %i\n",
+ sdadel_min, sdadel_max, scldel_min);
+
+ INIT_LIST_HEAD(&solutions);
+ /* Compute possible values for PRESC, SCLDEL and SDADEL */
+ for (p = 0; p < STM32F7_PRESC_MAX; p++) {
+ for (l = 0; l < STM32F7_SCLDEL_MAX; l++) {
+ u32 scldel = (l + 1) * (p + 1) * i2cclk;
+
+ if (scldel < scldel_min)
+ continue;
+
+ for (a = 0; a < STM32F7_SDADEL_MAX; a++) {
+ u32 sdadel = (a * (p + 1) + 1) * i2cclk;
+
+ if (((sdadel >= sdadel_min) &&
+ (sdadel <= sdadel_max)) &&
+ (p != p_prev)) {
+ v = kmalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ v->presc = p;
+ v->scldel = l;
+ v->sdadel = a;
+ p_prev = p;
+
+ list_add_tail(&v->node,
+ &solutions);
+ }
+ }
+ }
+ }
+
+ if (list_empty(&solutions)) {
+ dev_err(i2c_dev->dev, "no Prescaler solution\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ tsync = af_delay_min + dnf_delay + (2 * i2cclk);
+ s = NULL;
+ clk_max = NSEC_PER_SEC / i2c_specs[setup->speed].rate_min;
+ clk_min = NSEC_PER_SEC / i2c_specs[setup->speed].rate_max;
+
+ /*
+ * Among Prescaler possibilities discovered above figures out SCL Low
+ * and High Period. Provided:
+ * - SCL Low Period has to be higher than SCL Clock Low Period
+ * defined by I2C Specification. I2C Clock has to be lower than
+ * (SCL Low Period - Analog/Digital filters) / 4.
+ * - SCL High Period has to be lower than SCL Clock High Period
+ * defined by I2C Specification
+ * - I2C Clock has to be lower than SCL High Period
+ */
+ list_for_each_entry(v, &solutions, node) {
+ u32 prescaler = (v->presc + 1) * i2cclk;
+
+ for (l = 0; l < STM32F7_SCLL_MAX; l++) {
+ u32 tscl_l = (l + 1) * prescaler + tsync;
+
+ if ((tscl_l < i2c_specs[setup->speed].l_min) ||
+ (i2cclk >=
+ ((tscl_l - af_delay_min - dnf_delay) / 4))) {
+ continue;
+ }
+
+ for (h = 0; h < STM32F7_SCLH_MAX; h++) {
+ u32 tscl_h = (h + 1) * prescaler + tsync;
+ u32 tscl = tscl_l + tscl_h +
+ setup->rise_time + setup->fall_time;
+
+ if ((tscl >= clk_min) && (tscl <= clk_max) &&
+ (tscl_h >= i2c_specs[setup->speed].h_min) &&
+ (i2cclk < tscl_h)) {
+ int clk_error = tscl - i2cbus;
+
+ if (clk_error < 0)
+ clk_error = -clk_error;
+
+ if (clk_error < clk_error_prev) {
+ clk_error_prev = clk_error;
+ v->scll = l;
+ v->sclh = h;
+ s = v;
+ }
+ }
+ }
+ }
+ }
+
+ if (!s) {
+ dev_err(i2c_dev->dev, "no solution at all\n");
+ ret = -EPERM;
+ goto exit;
+ }
+
+ output->presc = s->presc;
+ output->scldel = s->scldel;
+ output->sdadel = s->sdadel;
+ output->scll = s->scll;
+ output->sclh = s->sclh;
+
+ dev_dbg(i2c_dev->dev,
+ "Presc: %i, scldel: %i, sdadel: %i, scll: %i, sclh: %i\n",
+ output->presc,
+ output->scldel, output->sdadel,
+ output->scll, output->sclh);
+
+exit:
+ /* Release list and memory */
+ list_for_each_entry_safe(v, _v, &solutions, node) {
+ list_del(&v->node);
+ kfree(v);
+ }
+
+ return ret;
+}
+
+static int stm32f7_i2c_setup_timing(struct stm32f7_i2c_dev *i2c_dev,
+ struct stm32f7_i2c_setup *setup)
+{
+ int ret = 0;
+
+ setup->speed = i2c_dev->speed;
+ setup->speed_freq = i2c_specs[setup->speed].rate;
+ setup->clock_src = clk_get_rate(i2c_dev->clk);
+
+ if (!setup->clock_src) {
+ dev_err(i2c_dev->dev, "clock rate is 0\n");
+ return -EINVAL;
+ }
+
+ do {
+ ret = stm32f7_i2c_compute_timing(i2c_dev, setup,
+ &i2c_dev->timing);
+ if (ret) {
+ dev_err(i2c_dev->dev,
+ "failed to compute I2C timings.\n");
+ if (i2c_dev->speed > STM32_I2C_SPEED_STANDARD) {
+ i2c_dev->speed--;
+ setup->speed = i2c_dev->speed;
+ setup->speed_freq =
+ i2c_specs[setup->speed].rate;
+ dev_warn(i2c_dev->dev,
+ "downgrade I2C Speed Freq to (%i)\n",
+ i2c_specs[setup->speed].rate);
+ } else {
+ break;
+ }
+ }
+ } while (ret);
+
+ if (ret) {
+ dev_err(i2c_dev->dev, "Impossible to compute I2C timings.\n");
+ return ret;
+ }
+
+ dev_dbg(i2c_dev->dev, "I2C Speed(%i), Freq(%i), Clk Source(%i)\n",
+ setup->speed, setup->speed_freq, setup->clock_src);
+ dev_dbg(i2c_dev->dev, "I2C Rise(%i) and Fall(%i) Time\n",
+ setup->rise_time, setup->fall_time);
+ dev_dbg(i2c_dev->dev, "I2C Analog Filter(%s), DNF(%i)\n",
+ (setup->analog_filter ? "On" : "Off"), setup->dnf);
+
+ return 0;
+}
+
+static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_timings *t = &i2c_dev->timing;
+ u32 timing = 0;
+
+ /* Timing settings */
+ timing |= STM32F7_I2C_TIMINGR_PRESC(t->presc);
+ timing |= STM32F7_I2C_TIMINGR_SCLDEL(t->scldel);
+ timing |= STM32F7_I2C_TIMINGR_SDADEL(t->sdadel);
+ timing |= STM32F7_I2C_TIMINGR_SCLH(t->sclh);
+ timing |= STM32F7_I2C_TIMINGR_SCLL(t->scll);
+ writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
+
+ /* Enable I2C */
+ if (i2c_dev->setup->analog_filter)
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_ANFOFF);
+ else
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_ANFOFF);
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_PE);
+}
+
+static void stm32f7_i2c_write_tx_data(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ void __iomem *base = i2c_dev->base;
+
+ if (f7_msg->count) {
+ writeb_relaxed(*f7_msg->buf++, base + STM32F7_I2C_TXDR);
+ f7_msg->count--;
+ }
+}
+
+static void stm32f7_i2c_read_rx_data(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ void __iomem *base = i2c_dev->base;
+
+ if (f7_msg->count) {
+ *f7_msg->buf++ = readb_relaxed(base + STM32F7_I2C_RXDR);
+ f7_msg->count--;
+ }
+}
+
+static void stm32f7_i2c_reload(struct stm32f7_i2c_dev *i2c_dev)
+{
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ u32 cr2;
+
+ cr2 = readl_relaxed(i2c_dev->base + STM32F7_I2C_CR2);
+
+ cr2 &= ~STM32F7_I2C_CR2_NBYTES_MASK;
+ if (f7_msg->count > STM32F7_I2C_MAX_LEN) {
+ cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN);
+ } else {
+ cr2 &= ~STM32F7_I2C_CR2_RELOAD;
+ cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count);
+ }
+
+ writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2);
+}
+
+static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev)
+{
+ u32 status;
+ int ret;
+
+ ret = readl_relaxed_poll_timeout(i2c_dev->base + STM32F7_I2C_ISR,
+ status,
+ !(status & STM32F7_I2C_ISR_BUSY),
+ 10, 1000);
+ if (ret) {
+ dev_dbg(i2c_dev->dev, "bus busy\n");
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
+ struct i2c_msg *msg)
+{
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ void __iomem *base = i2c_dev->base;
+ u32 cr1, cr2;
+
+ f7_msg->addr = msg->addr;
+ f7_msg->buf = msg->buf;
+ f7_msg->count = msg->len;
+ f7_msg->result = 0;
+ f7_msg->stop = (i2c_dev->msg_id >= i2c_dev->msg_num - 1);
+
+ reinit_completion(&i2c_dev->complete);
+
+ cr1 = readl_relaxed(base + STM32F7_I2C_CR1);
+ cr2 = readl_relaxed(base + STM32F7_I2C_CR2);
+
+ /* Set transfer direction */
+ cr2 &= ~STM32F7_I2C_CR2_RD_WRN;
+ if (msg->flags & I2C_M_RD)
+ cr2 |= STM32F7_I2C_CR2_RD_WRN;
+
+ /* Set slave address */
+ cr2 &= ~STM32F7_I2C_CR2_SADD7_MASK;
+ cr2 |= STM32F7_I2C_CR2_SADD7(f7_msg->addr);
+
+ /* Set nb bytes to transfer and reload if needed */
+ cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK | STM32F7_I2C_CR2_RELOAD);
+ if (f7_msg->count > STM32F7_I2C_MAX_LEN) {
+ cr2 |= STM32F7_I2C_CR2_NBYTES(STM32F7_I2C_MAX_LEN);
+ cr2 |= STM32F7_I2C_CR2_RELOAD;
+ } else {
+ cr2 |= STM32F7_I2C_CR2_NBYTES(f7_msg->count);
+ }
+
+ /* Enable NACK, STOP, error and transfer complete interrupts */
+ cr1 |= STM32F7_I2C_CR1_ERRIE | STM32F7_I2C_CR1_TCIE |
+ STM32F7_I2C_CR1_STOPIE | STM32F7_I2C_CR1_NACKIE;
+
+ /* Clear TX/RX interrupt */
+ cr1 &= ~(STM32F7_I2C_CR1_RXIE | STM32F7_I2C_CR1_TXIE);
+
+ /* Enable RX/TX interrupt according to msg direction */
+ if (msg->flags & I2C_M_RD)
+ cr1 |= STM32F7_I2C_CR1_RXIE;
+ else
+ cr1 |= STM32F7_I2C_CR1_TXIE;
+
+ /* Configure Start/Repeated Start */
+ cr2 |= STM32F7_I2C_CR2_START;
+
+ /* Write configurations registers */
+ writel_relaxed(cr1, base + STM32F7_I2C_CR1);
+ writel_relaxed(cr2, base + STM32F7_I2C_CR2);
+}
+
+static void stm32f7_i2c_disable_irq(struct stm32f7_i2c_dev *i2c_dev, u32 mask)
+{
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, mask);
+}
+
+static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
+{
+ struct stm32f7_i2c_dev *i2c_dev = data;
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ void __iomem *base = i2c_dev->base;
+ u32 status, mask;
+
+ status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+
+ /* Tx empty */
+ if (status & STM32F7_I2C_ISR_TXIS)
+ stm32f7_i2c_write_tx_data(i2c_dev);
+
+ /* RX not empty */
+ if (status & STM32F7_I2C_ISR_RXNE)
+ stm32f7_i2c_read_rx_data(i2c_dev);
+
+ /* NACK received */
+ if (status & STM32F7_I2C_ISR_NACKF) {
+ dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__);
+ writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+ f7_msg->result = -ENXIO;
+ }
+
+ /* STOP detection flag */
+ if (status & STM32F7_I2C_ISR_STOPF) {
+ /* Disable interrupts */
+ stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK);
+
+ /* Clear STOP flag */
+ writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
+
+ complete(&i2c_dev->complete);
+ }
+
+ /* Transfer complete */
+ if (status & STM32F7_I2C_ISR_TC) {
+ if (f7_msg->stop) {
+ mask = STM32F7_I2C_CR2_STOP;
+ stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
+ } else {
+ i2c_dev->msg_id++;
+ i2c_dev->msg++;
+ stm32f7_i2c_xfer_msg(i2c_dev, i2c_dev->msg);
+ }
+ }
+
+ /*
+ * Transfer Complete Reload: 255 data bytes have been transferred
+ * We have to prepare the I2C controller to transfer the remaining
+ * data.
+ */
+ if (status & STM32F7_I2C_ISR_TCR)
+ stm32f7_i2c_reload(i2c_dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
+{
+ struct stm32f7_i2c_dev *i2c_dev = data;
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ void __iomem *base = i2c_dev->base;
+ struct device *dev = i2c_dev->dev;
+ u32 status;
+
+ status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
+
+ /* Bus error */
+ if (status & STM32F7_I2C_ISR_BERR) {
+ dev_err(dev, "<%s>: Bus error\n", __func__);
+ writel_relaxed(STM32F7_I2C_ICR_BERRCF, base + STM32F7_I2C_ICR);
+ f7_msg->result = -EIO;
+ }
+
+ /* Arbitration loss */
+ if (status & STM32F7_I2C_ISR_ARLO) {
+ dev_dbg(dev, "<%s>: Arbitration loss\n", __func__);
+ writel_relaxed(STM32F7_I2C_ICR_ARLOCF, base + STM32F7_I2C_ICR);
+ f7_msg->result = -EAGAIN;
+ }
+
+ stm32f7_i2c_disable_irq(i2c_dev, STM32F7_I2C_ALL_IRQ_MASK);
+
+ complete(&i2c_dev->complete);
+
+ return IRQ_HANDLED;
+}
+
+static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+ struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ unsigned long time_left;
+ int ret;
+
+ i2c_dev->msg = msgs;
+ i2c_dev->msg_num = num;
+ i2c_dev->msg_id = 0;
+
+ ret = clk_enable(i2c_dev->clk);
+ if (ret) {
+ dev_err(i2c_dev->dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ ret = stm32f7_i2c_wait_free_bus(i2c_dev);
+ if (ret)
+ goto clk_free;
+
+ stm32f7_i2c_xfer_msg(i2c_dev, msgs);
+
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ i2c_dev->adap.timeout);
+ ret = f7_msg->result;
+
+ if (!time_left) {
+ dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
+ i2c_dev->msg->addr);
+ ret = -ETIMEDOUT;
+ }
+
+clk_free:
+ clk_disable(i2c_dev->clk);
+
+ return (ret < 0) ? ret : num;
+}
+
+static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm stm32f7_i2c_algo = {
+ .master_xfer = stm32f7_i2c_xfer,
+ .functionality = stm32f7_i2c_func,
+};
+
+static int stm32f7_i2c_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct stm32f7_i2c_dev *i2c_dev;
+ const struct stm32f7_i2c_setup *setup;
+ struct resource *res;
+ u32 irq_error, irq_event, clk_rate, rise_time, fall_time;
+ struct i2c_adapter *adap;
+ struct reset_control *rst;
+ int ret;
+
+ i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
+ if (!i2c_dev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(i2c_dev->base))
+ return PTR_ERR(i2c_dev->base);
+
+ irq_event = irq_of_parse_and_map(np, 0);
+ if (!irq_event) {
+ dev_err(&pdev->dev, "IRQ event missing or invalid\n");
+ return -EINVAL;
+ }
+
+ irq_error = irq_of_parse_and_map(np, 1);
+ if (!irq_error) {
+ dev_err(&pdev->dev, "IRQ error missing or invalid\n");
+ return -EINVAL;
+ }
+
+ i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(i2c_dev->clk)) {
+ dev_err(&pdev->dev, "Error: Missing controller clock\n");
+ return PTR_ERR(i2c_dev->clk);
+ }
+ ret = clk_prepare_enable(i2c_dev->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
+
+ i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
+ ret = device_property_read_u32(&pdev->dev, "clock-frequency",
+ &clk_rate);
+ if (!ret && clk_rate >= 1000000)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST_PLUS;
+ else if (!ret && clk_rate >= 400000)
+ i2c_dev->speed = STM32_I2C_SPEED_FAST;
+ else if (!ret && clk_rate >= 100000)
+ i2c_dev->speed = STM32_I2C_SPEED_STANDARD;
+
+ rst = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(rst)) {
+ dev_err(&pdev->dev, "Error: Missing controller reset\n");
+ ret = PTR_ERR(rst);
+ goto clk_free;
+ }
+ reset_control_assert(rst);
+ udelay(2);
+ reset_control_deassert(rst);
+
+ i2c_dev->dev = &pdev->dev;
+
+ ret = devm_request_irq(&pdev->dev, irq_event, stm32f7_i2c_isr_event, 0,
+ pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq event %i\n",
+ irq_event);
+ goto clk_free;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
+ pdev->name, i2c_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq error %i\n",
+ irq_error);
+ goto clk_free;
+ }
+
+ setup = of_device_get_match_data(&pdev->dev);
+ i2c_dev->setup->rise_time = setup->rise_time;
+ i2c_dev->setup->fall_time = setup->fall_time;
+ i2c_dev->setup->dnf = setup->dnf;
+ i2c_dev->setup->analog_filter = setup->analog_filter;
+
+ ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
+ &rise_time);
+ if (!ret)
+ i2c_dev->setup->rise_time = rise_time;
+
+ ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
+ &fall_time);
+ if (!ret)
+ i2c_dev->setup->fall_time = fall_time;
+
+ ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+ if (ret)
+ goto clk_free;
+
+ stm32f7_i2c_hw_config(i2c_dev);
+
+ adap = &i2c_dev->adap;
+ i2c_set_adapdata(adap, i2c_dev);
+ snprintf(adap->name, sizeof(adap->name), "STM32F7 I2C(%pa)",
+ &res->start);
+ adap->owner = THIS_MODULE;
+ adap->timeout = 2 * HZ;
+ adap->retries = 3;
+ adap->algo = &stm32f7_i2c_algo;
+ adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
+
+ init_completion(&i2c_dev->complete);
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ goto clk_free;
+
+ platform_set_drvdata(pdev, i2c_dev);
+
+ clk_disable(i2c_dev->clk);
+
+ dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr);
+
+ return 0;
+
+clk_free:
+ clk_disable_unprepare(i2c_dev->clk);
+
+ return ret;
+}
+
+static int stm32f7_i2c_remove(struct platform_device *pdev)
+{
+ struct stm32f7_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+
+ i2c_del_adapter(&i2c_dev->adap);
+
+ clk_unprepare(i2c_dev->clk);
+
+ return 0;
+}
+
+static const struct of_device_id stm32f7_i2c_match[] = {
+ { .compatible = "st,stm32f7-i2c", .data = &stm32f7_setup},
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm32f7_i2c_match);
+
+static struct platform_driver stm32f7_i2c_driver = {
+ .driver = {
+ .name = "stm32f7-i2c",
+ .of_match_table = stm32f7_i2c_match,
+ },
+ .probe = stm32f7_i2c_probe,
+ .remove = stm32f7_i2c_remove,
+};
+
+module_platform_driver(stm32f7_i2c_driver);
+
+MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32F7 I2C driver");
+MODULE_LICENSE("GPL v2");